diff --git a/CHANGELOG.md b/CHANGELOG.md index 02d2cba3634..65376eeec22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ ## main / unreleased +* [FEATURE] Add ability to search ingesters for traces [#806](https://github.com/grafana/tempo/pull/806) (@mdisibio) * [BUGFIX] Update port spec for GCS docker-compose example [#869](https://github.com/grafana/tempo/pull/869) (@zalegrala) * [BUGFIX] Cortex upgrade to fix an issue where unhealthy compactors can't be forgotten [#878](https://github.com/grafana/tempo/pull/878) (@joe-elliott) * [ENHANCEMENT] Added "query blocks" cli option. [#876](https://github.com/grafana/tempo/pull/876) (@joe-elliott) diff --git a/Makefile b/Makefile index 8f237c6f869..df9dc7110aa 100644 --- a/Makefile +++ b/Makefile @@ -167,13 +167,18 @@ gen-proto: rm -rf $(PROTO_INTERMEDIATE_DIR) +.PHONY: gen-flat +gen-flat: + # -o /pkg generates into same folder as tempo.fbs for simpler imports. + docker run -v${PWD}:/opt/src neomantra/flatbuffers flatc --go -o /opt/src/pkg /opt/src/pkg/tempofb/tempo.fbs ### Check vendored files and generated proto .PHONY: vendor-check -vendor-check: gen-proto +vendor-check: gen-proto gen-flat go mod vendor go mod tidy -e - git diff --exit-code -- go.sum go.mod vendor/ pkg/tempopb/ + git diff --exit-code -- go.sum go.mod vendor/ pkg/tempopb/ pkg/tempofb/ + ### Release (intended to be used in the .github/workflows/images.yml) $(GORELEASER): diff --git a/cmd/tempo-cli/cmd-gen-index.go b/cmd/tempo-cli/cmd-gen-index.go index f36c4ad5f41..a9ebfba4adc 100644 --- a/cmd/tempo-cli/cmd-gen-index.go +++ b/cmd/tempo-cli/cmd-gen-index.go @@ -95,7 +95,7 @@ func VerifyIndex(indexReader common.IndexReader, dataReader common.DataReader) e } // read data file at record position - _, _, err = dataReader.Read(context.TODO(), []common.Record{*record}, nil) + _, _, err = dataReader.Read(context.TODO(), []common.Record{*record}, nil, nil) if err != nil { fmt.Println("index/data is corrupt, record/data mismatch") return err diff --git a/cmd/tempo-query/main.go b/cmd/tempo-query/main.go index 572e686d4cb..cddb4982e1a 100644 --- a/cmd/tempo-query/main.go +++ b/cmd/tempo-query/main.go @@ -5,15 +5,19 @@ import ( "io" "strings" - "github.com/spf13/viper" - - "github.com/grafana/tempo/cmd/tempo-query/tempo" "github.com/hashicorp/go-hclog" + hcplugin "github.com/hashicorp/go-plugin" "github.com/jaegertracing/jaeger/plugin/storage/grpc" "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" + otgrpc "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "github.com/spf13/viper" jaeger_config "github.com/uber/jaeger-client-go/config" + google_grpc "google.golang.org/grpc" + + "github.com/grafana/tempo/cmd/tempo-query/tempo" ) func main() { @@ -51,8 +55,13 @@ func main() { backend := tempo.New(cfg) plugin := &plugin{backend: backend} - grpc.Serve(&shared.PluginServices{ + grpc.ServeWithGRPCServer(&shared.PluginServices{ Store: plugin, + }, func(options []google_grpc.ServerOption) *google_grpc.Server { + return hcplugin.DefaultGRPCServer([]google_grpc.ServerOption{ + google_grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer())), + google_grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer())), + }) }) } diff --git a/cmd/tempo-query/tempo/plugin.go b/cmd/tempo-query/tempo/plugin.go index 0f0945893cc..65522a82ce0 100644 --- a/cmd/tempo-query/tempo/plugin.go +++ b/cmd/tempo-query/tempo/plugin.go @@ -5,8 +5,12 @@ import ( "fmt" "io" "net/http" + "net/url" + "strconv" "time" + "github.com/gogo/protobuf/jsonpb" + "github.com/grafana/tempo/pkg/tempopb" "github.com/opentracing/opentracing-go" ot_log "github.com/opentracing/opentracing-go/log" "github.com/weaveworks/common/user" @@ -25,48 +29,45 @@ const ( ProtobufTypeHeaderValue = "application/protobuf" ) +const ( + serviceSearchTag = "service.name" + operationSearchTag = "name" + minDurationSearchTag = "minDuration" + maxDurationSearchTag = "maxDuration" + numTracesSearchTag = "limit" +) + type Backend struct { - tempoEndpoint string + tempoBackend string } func New(cfg *Config) *Backend { return &Backend{ - tempoEndpoint: "http://" + cfg.Backend + "/api/traces/", + tempoBackend: cfg.Backend, } } func (b *Backend) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]jaeger.DependencyLink, error) { return nil, nil } + func (b *Backend) GetTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger.Trace, error) { - hexID := fmt.Sprintf("%016x%016x", traceID.High, traceID.Low) + url := fmt.Sprintf("http://%s/api/traces/%s", b.tempoBackend, traceID) - span, _ := opentracing.StartSpanFromContext(ctx, "GetTrace") + span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.GetTrace") defer span.Finish() - req, err := http.NewRequestWithContext(ctx, "GET", b.tempoEndpoint+hexID, nil) + req, err := b.newGetRequest(ctx, url, span) if err != nil { return nil, err } - if tracer := opentracing.GlobalTracer(); tracer != nil { - // this is not really loggable or anything we can react to. just ignoring this error - _ = tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header)) - } - - // currently Jaeger Query will only propagate bearer token to the grpc backend and no other headers - // so we are going to extract the tenant id from the header, if it exists and use it - tenantID, found := extractBearerToken(ctx) - if found { - req.Header.Set(user.OrgIDHeaderName, tenantID) - } - - // Set content type to grpc + // Set content type to GRPC req.Header.Set(AcceptHeaderKey, ProtobufTypeHeaderValue) resp, err := http.DefaultClient.Do(req) if err != nil { - return nil, fmt.Errorf("failed get to tempo %w", err) + return nil, fmt.Errorf("failed GET to tempo %w", err) } defer resp.Body.Close() @@ -91,7 +92,7 @@ func (b *Backend) GetTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger jaegerBatches, err := ot_jaeger.InternalTracesToJaegerProto(otTrace) if err != nil { - return nil, fmt.Errorf("error translating to jaegerBatches %v: %w", hexID, err) + return nil, fmt.Errorf("error translating to jaegerBatches %v: %w", traceID, err) } jaegerTrace := &jaeger.Trace{ @@ -117,21 +118,186 @@ func (b *Backend) GetTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger } func (b *Backend) GetServices(ctx context.Context) ([]string, error) { - return nil, nil + span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.GetOperations") + defer span.Finish() + + return b.lookupTagValues(ctx, span, serviceSearchTag) } + func (b *Backend) GetOperations(ctx context.Context, query jaeger_spanstore.OperationQueryParameters) ([]jaeger_spanstore.Operation, error) { - return nil, nil + span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.GetOperations") + defer span.Finish() + + tagValues, err := b.lookupTagValues(ctx, span, operationSearchTag) + if err != nil { + return nil, err + } + + var operations []jaeger_spanstore.Operation + for _, value := range tagValues { + operations = append(operations, jaeger_spanstore.Operation{ + Name: value, + SpanKind: "", + }) + } + + return operations, nil + } + func (b *Backend) FindTraces(ctx context.Context, query *jaeger_spanstore.TraceQueryParameters) ([]*jaeger.Trace, error) { - return nil, nil + span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.FindTraces") + defer span.Finish() + + traceIDs, err := b.FindTraceIDs(ctx, query) + if err != nil { + return nil, err + } + + span.LogFields(ot_log.String("msg", fmt.Sprintf("Found %d trace IDs", len(traceIDs)))) + + // for every traceID, get the full trace + var jaegerTraces []*jaeger.Trace + for _, traceID := range traceIDs { + trace, err := b.GetTrace(ctx, traceID) + if err != nil { + // TODO this seems to be an internal inconsistency error, ignore so we can still show the rest + span.LogFields(ot_log.Error(fmt.Errorf("could not get trace for traceID %v: %w", traceID, err))) + continue + } + + jaegerTraces = append(jaegerTraces, trace) + } + + span.LogFields(ot_log.String("msg", fmt.Sprintf("Returning %d traces", len(jaegerTraces)))) + + return jaegerTraces, nil } + func (b *Backend) FindTraceIDs(ctx context.Context, query *jaeger_spanstore.TraceQueryParameters) ([]jaeger.TraceID, error) { - return nil, nil + span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.FindTraceIDs") + defer span.Finish() + + url := url.URL{ + Scheme: "http", + Host: b.tempoBackend, + Path: "api/search", + } + urlQuery := url.Query() + urlQuery.Set(serviceSearchTag, query.ServiceName) + urlQuery.Set(operationSearchTag, query.OperationName) + urlQuery.Set(minDurationSearchTag, query.DurationMin.String()) + urlQuery.Set(maxDurationSearchTag, query.DurationMax.String()) + urlQuery.Set(numTracesSearchTag, strconv.Itoa(query.NumTraces)) + for k, v := range query.Tags { + urlQuery.Set(k, v) + } + url.RawQuery = urlQuery.Encode() + + req, err := b.newGetRequest(ctx, url.String(), span) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed GET to tempo %w", err) + } + defer resp.Body.Close() + + // if search endpoint returns 404, search is most likely not enabled + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading response from Tempo: got %s", resp.Status) + } + return nil, fmt.Errorf("%s", body) + } + + var searchResponse tempopb.SearchResponse + err = jsonpb.Unmarshal(resp.Body, &searchResponse) + if err != nil { + return nil, fmt.Errorf("error unmarshaling Tempo response: %w", err) + } + + jaegerTraceIDs := make([]jaeger.TraceID, len(searchResponse.Traces)) + + for i, traceMetadata := range searchResponse.Traces { + jaegerTraceID, err := jaeger.TraceIDFromString(traceMetadata.TraceID) + if err != nil { + return nil, fmt.Errorf("could not convert traceID into Jaeger's traceID %w", err) + } + jaegerTraceIDs[i] = jaegerTraceID + } + + return jaegerTraceIDs, nil } + +func (b *Backend) lookupTagValues(ctx context.Context, span opentracing.Span, tagName string) ([]string, error) { + url := fmt.Sprintf("http://%s/api/search/tag/%s/values", b.tempoBackend, tagName) + + req, err := b.newGetRequest(ctx, url, span) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed GET to tempo %w", err) + } + defer resp.Body.Close() + + // if search endpoint returns 404, search is most likely not enabled + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading response from Tempo: got %s", resp.Status) + } + return nil, fmt.Errorf("%s", body) + } + + var searchLookupResponse tempopb.SearchTagValuesResponse + err = jsonpb.Unmarshal(resp.Body, &searchLookupResponse) + if err != nil { + return nil, fmt.Errorf("error unmarshaling Tempo response: %w", err) + } + + return searchLookupResponse.TagValues, nil +} + func (b *Backend) WriteSpan(ctx context.Context, span *jaeger.Span) error { return nil } +func (b *Backend) newGetRequest(ctx context.Context, url string, span opentracing.Span) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + + if tracer := opentracing.GlobalTracer(); tracer != nil { + // this is not really loggable or anything we can react to. just ignoring this error + _ = tracer.Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header)) + } + + // currently Jaeger Query will only propagate bearer token to the grpc backend and no other headers + // so we are going to extract the tenant id from the header, if it exists and use it + tenantID, found := extractBearerToken(ctx) + if found { + req.Header.Set(user.OrgIDHeaderName, tenantID) + } + + return req, nil +} + func extractBearerToken(ctx context.Context) (string, bool) { if md, ok := metadata.FromIncomingContext(ctx); ok { values := md.Get(spanstore.BearerTokenKey) diff --git a/cmd/tempo/app/app.go b/cmd/tempo/app/app.go index c0f4d91509e..f89ce3edf31 100644 --- a/cmd/tempo/app/app.go +++ b/cmd/tempo/app/app.go @@ -46,6 +46,7 @@ type Config struct { Target string `yaml:"target,omitempty"` AuthEnabled bool `yaml:"auth_enabled,omitempty"` MultitenancyEnabled bool `yaml:"multitenancy_enabled,omitempty"` + SearchEnabled bool `yaml:"search_enabled,omitempty"` HTTPAPIPrefix string `yaml:"http_api_prefix"` UseOTelTracer bool `yaml:"use_otel_tracer,omitempty"` @@ -68,6 +69,7 @@ func (c *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { f.StringVar(&c.Target, "target", All, "target module") f.BoolVar(&c.AuthEnabled, "auth.enabled", false, "Set to true to enable auth (deprecated: use multitenancy.enabled)") f.BoolVar(&c.MultitenancyEnabled, "multitenancy.enabled", false, "Set to true to enable multitenancy.") + f.BoolVar(&c.SearchEnabled, "search.enabled", false, "Set to true to enable search (unstable).") f.StringVar(&c.HTTPAPIPrefix, "http-api-prefix", "", "String prefix for all http api endpoints.") f.BoolVar(&c.UseOTelTracer, "use-otel-tracer", false, "Set to true to replace the OpenTracing tracer with the OpenTelemetry tracer") diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 3ea4bcf1fa4..9022686bc76 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -47,8 +47,11 @@ const ( ) const ( - apiPathTraces string = "/api/traces/{traceID}" - apiPathEcho string = "/api/echo" + apiPathTraces string = "/api/traces/{traceID}" + apiPathSearch string = "/api/search" + apiPathSearchTags string = "/api/search/tags" + apiPathSearchTagValues string = "/api/search/tag/{tagName}/values" + apiPathEcho string = "/api/echo" ) func (t *App) initServer() (services.Service, error) { @@ -104,7 +107,7 @@ func (t *App) initOverrides() (services.Service, error) { func (t *App) initDistributor() (services.Service, error) { // todo: make ingester client a module instead of passing the config everywhere - distributor, err := distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring, t.overrides, t.cfg.MultitenancyIsEnabled(), t.cfg.Server.LogLevel) + distributor, err := distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring, t.overrides, t.cfg.MultitenancyIsEnabled(), t.cfg.Server.LogLevel, t.cfg.SearchEnabled) if err != nil { return nil, fmt.Errorf("failed to create distributor %w", err) } @@ -156,11 +159,24 @@ func (t *App) initQuerier() (services.Service, error) { } t.querier = querier - tracesHandler := middleware.Merge( + middleware := middleware.Merge( t.httpAuthMiddleware, - ).Wrap(http.HandlerFunc(t.querier.TraceByIDHandler)) + ) + tracesHandler := middleware.Wrap(http.HandlerFunc(t.querier.TraceByIDHandler)) t.Server.HTTP.Handle(path.Join("/querier", addHTTPAPIPrefix(&t.cfg, apiPathTraces)), tracesHandler) + + if t.cfg.SearchEnabled { + searchHandler := middleware.Wrap(http.HandlerFunc(t.querier.SearchHandler)) + t.Server.HTTP.Handle(path.Join("/querier", addHTTPAPIPrefix(&t.cfg, apiPathSearch)), searchHandler) + + searchTagsHandler := middleware.Wrap(http.HandlerFunc(t.querier.SearchTagsHandler)) + t.Server.HTTP.Handle(path.Join("/querier", addHTTPAPIPrefix(&t.cfg, apiPathSearchTags)), searchTagsHandler) + + searchTagValuesHandler := middleware.Wrap(http.HandlerFunc(t.querier.SearchTagValuesHandler)) + t.Server.HTTP.Handle(path.Join("/querier", addHTTPAPIPrefix(&t.cfg, apiPathSearchTagValues)), searchTagValuesHandler) + } + return t.querier, t.querier.CreateAndRegisterWorker(t.Server.HTTPServer.Handler) } @@ -169,31 +185,38 @@ func (t *App) initQueryFrontend() (services.Service, error) { return nil, fmt.Errorf("frontend query shards should be between %d and %d (both inclusive)", frontend.MinQueryShards, frontend.MaxQueryShards) } - var err error cortexTripper, v1, _, err := cortex_frontend.InitFrontend(t.cfg.Frontend.Config, frontend.CortexNoQuerierLimits{}, 0, log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } t.frontend = v1 - // custom tripperware that splits requests - shardingTripperWare, err := frontend.NewTripperware(t.cfg.Frontend, log.Logger, prometheus.DefaultRegisterer) + tripperware, err := frontend.NewTripperware(t.cfg.Frontend, t.cfg.HTTPAPIPrefix, log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } - shardingTripper := shardingTripperWare(cortexTripper) + roundTripper := tripperware(cortexTripper) - cortexHandler := cortex_transport.NewHandler(t.cfg.Frontend.Config.Handler, shardingTripper, log.Logger, prometheus.DefaultRegisterer) + frontendHandler := cortex_transport.NewHandler(t.cfg.Frontend.Config.Handler, roundTripper, log.Logger, prometheus.DefaultRegisterer) - tracesHandler := middleware.Merge( + frontendHandler = middleware.Merge( t.httpAuthMiddleware, - ).Wrap(cortexHandler) + ).Wrap(frontendHandler) // register grpc server for queriers to connect to cortex_frontend_v1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend) + // http query endpoint - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, apiPathTraces), tracesHandler) + t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, apiPathTraces), frontendHandler) + + // http search endpoints + if t.cfg.SearchEnabled { + t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, apiPathSearch), frontendHandler) + t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, apiPathSearchTags), frontendHandler) + t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, apiPathSearchTagValues), frontendHandler) + } + // http query echo endpoint t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, apiPathEcho), echoHandler()) return t.frontend, nil diff --git a/example/docker-compose/tempo-search/docker-compose.yaml b/example/docker-compose/tempo-search/docker-compose.yaml new file mode 100644 index 00000000000..aedf1c896d3 --- /dev/null +++ b/example/docker-compose/tempo-search/docker-compose.yaml @@ -0,0 +1,51 @@ +version: "3" +services: + + tempo: + image: grafana/tempo:latest + command: [ "-search.enabled=true", "-config.file=/etc/tempo.yaml" ] + volumes: + - ../local/tempo-local.yaml:/etc/tempo.yaml + - ./tempo-data/:/tmp/tempo + ports: + - "3200:3200" # tempo + - "14268" # jaeger ingest + + tempo-query: + image: grafana/tempo-query:latest + command: [ "--grpc-storage-plugin.configuration-file=/etc/tempo-query.yaml" ] + volumes: + - ./tempo-query.yaml:/etc/tempo-query.yaml + ports: + - "16686:16686" # jaeger-ui + depends_on: + - tempo + + synthetic-load-generator: + image: omnition/synthetic-load-generator:1.0.25 + volumes: + - ../shared/load-generator.json:/etc/load-generator.json + environment: + - TOPOLOGY_FILE=/etc/load-generator.json + - JAEGER_COLLECTOR_URL=http://tempo:14268 + depends_on: + - tempo + + prometheus: + image: prom/prometheus:latest + command: [ "--config.file=/etc/prometheus.yaml" ] + volumes: + - ../shared/prometheus.yaml:/etc/prometheus.yaml + ports: + - "9090:9090" + + grafana: + image: grafana/grafana:8.0.3 + volumes: + - ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml + environment: + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + - GF_AUTH_DISABLE_LOGIN_FORM=true + ports: + - "3000:3000" \ No newline at end of file diff --git a/example/docker-compose/tempo-search/grafana-datasources.yaml b/example/docker-compose/tempo-search/grafana-datasources.yaml new file mode 100644 index 00000000000..66c7fdc5be8 --- /dev/null +++ b/example/docker-compose/tempo-search/grafana-datasources.yaml @@ -0,0 +1,34 @@ +apiVersion: 1 + +datasources: +- name: Prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: false + version: 1 + editable: false +- name: 'Tempo' + type: tempo + access: proxy + orgId: 1 + url: http://tempo:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + apiVersion: 1 + uid: tempo-query +- name: 'Jaeger (Tempo)' + type: jaeger + access: proxy + orgId: 1 + url: http://tempo-query:16686 + basicAuth: false + isDefault: true + version: 1 + editable: false + apiVersion: 1 + uid: jaeger-tempo \ No newline at end of file diff --git a/example/docker-compose/tempo-search/tempo-query.yaml b/example/docker-compose/tempo-search/tempo-query.yaml new file mode 100644 index 00000000000..cd6ee1bf023 --- /dev/null +++ b/example/docker-compose/tempo-search/tempo-query.yaml @@ -0,0 +1 @@ +backend: "tempo:3200" \ No newline at end of file diff --git a/go.mod b/go.mod index c395e320755..754c909d071 100644 --- a/go.mod +++ b/go.mod @@ -20,18 +20,20 @@ require ( github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.3 + github.com/google/flatbuffers v2.0.0+incompatible github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.2.0 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-hclog v0.14.0 - github.com/hashicorp/go-plugin v1.3.0 // indirect + github.com/hashicorp/go-plugin v1.3.0 github.com/jaegertracing/jaeger v1.21.0 github.com/jsternberg/zap-logfmt v1.2.0 github.com/klauspost/compress v1.13.1 github.com/minio/minio-go/v7 v7.0.10 github.com/olekukonko/tablewriter v0.0.2 + github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4/v4 v4.1.3 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index bc6648e2d97..58c00b541fc 100644 --- a/go.sum +++ b/go.sum @@ -895,6 +895,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= diff --git a/modules/distributor/distributor.go b/modules/distributor/distributor.go index 3dedee769c0..1b1456c9ea3 100644 --- a/modules/distributor/distributor.go +++ b/modules/distributor/distributor.go @@ -94,6 +94,7 @@ type Distributor struct { ingestersRing ring.ReadRing pool *ring_client.Pool DistributorRing *ring.Ring + searchEnabled bool // Per-user rate limiter. ingestionRateLimiter *limiter.RateLimiter @@ -104,7 +105,7 @@ type Distributor struct { } // New a distributor creates. -func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRing, o *overrides.Overrides, multitenancyEnabled bool, level logging.Level) (*Distributor, error) { +func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRing, o *overrides.Overrides, multitenancyEnabled bool, level logging.Level, searchEnabled bool) (*Distributor, error) { factory := cfg.factory if factory == nil { factory = func(addr string) (ring_client.PoolClient, error) { @@ -153,6 +154,7 @@ func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRi pool: pool, DistributorRing: distributorRing, ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), + searchEnabled: searchEnabled, } cfgReceivers := cfg.Receivers @@ -247,7 +249,13 @@ func (d *Distributor) Push(ctx context.Context, req *tempopb.PushRequest) (*temp return nil, err } - err = d.sendToIngestersViaBytes(ctx, userID, traces, keys, ids) + //var + var searchData [][]byte + if d.searchEnabled { + searchData = extractSearchDataAll(traces, ids) + } + + err = d.sendToIngestersViaBytes(ctx, userID, traces, searchData, keys, ids) if err != nil { recordDiscaredSpans(err, userID, spanCount) } @@ -255,7 +263,7 @@ func (d *Distributor) Push(ctx context.Context, req *tempopb.PushRequest) (*temp return nil, err // PushRequest is ignored, so no reason to create one } -func (d *Distributor) sendToIngestersViaBytes(ctx context.Context, userID string, traces []*tempopb.Trace, keys []uint32, ids [][]byte) error { +func (d *Distributor) sendToIngestersViaBytes(ctx context.Context, userID string, traces []*tempopb.Trace, searchData [][]byte, keys []uint32, ids [][]byte) error { // Marshal to bytes once marshalledTraces := make([][]byte, len(traces)) for i, t := range traces { @@ -277,13 +285,19 @@ func (d *Distributor) sendToIngestersViaBytes(ctx context.Context, userID string localCtx = user.InjectOrgID(localCtx, userID) req := tempopb.PushBytesRequest{ - Traces: make([]tempopb.PreallocBytes, len(indexes)), - Ids: make([]tempopb.PreallocBytes, len(indexes)), + Traces: make([]tempopb.PreallocBytes, len(indexes)), + Ids: make([]tempopb.PreallocBytes, len(indexes)), + SearchData: make([]tempopb.PreallocBytes, len(indexes)), } for i, j := range indexes { req.Traces[i].Slice = marshalledTraces[j][0:] req.Ids[i].Slice = ids[j] + + // Search data optional + if len(searchData) > j { + req.SearchData[i].Slice = searchData[j] + } } c, err := d.pool.GetClientFor(ingester.Addr) diff --git a/modules/distributor/distributor_test.go b/modules/distributor/distributor_test.go index f135680725b..399b67dee41 100644 --- a/modules/distributor/distributor_test.go +++ b/modules/distributor/distributor_test.go @@ -428,7 +428,7 @@ func prepare(t *testing.T, limits *overrides.Limits, kvStore kv.Client) *Distrib l := logging.Level{} _ = l.Set("error") - d, err := New(distributorConfig, clientConfig, ingestersRing, overrides, true, l) + d, err := New(distributorConfig, clientConfig, ingestersRing, overrides, true, l, false) require.NoError(t, err) return d diff --git a/modules/distributor/search_data.go b/modules/distributor/search_data.go new file mode 100644 index 00000000000..2686165dcaf --- /dev/null +++ b/modules/distributor/search_data.go @@ -0,0 +1,107 @@ +package distributor + +import ( + "fmt" + "strconv" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + common_v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" + "github.com/grafana/tempo/tempodb/search" +) + +// extractSearchDataAll returns flatbuffer search data for every trace. +func extractSearchDataAll(traces []*tempopb.Trace, ids [][]byte) [][]byte { + headers := make([][]byte, len(traces)) + + for i, t := range traces { + headers[i] = extractSearchData(t, ids[i]) + } + + return headers +} + +// extractSearchData returns the flatbuffer search data for the given trace. It is extracted here +// in the distributor because this is the only place on the ingest path where the trace is available +// in object form. +func extractSearchData(trace *tempopb.Trace, id []byte) []byte { + data := &tempofb.SearchEntryMutable{} + + data.TraceID = id + + for _, b := range trace.Batches { + // Batch attrs + if b.Resource != nil { + for _, a := range b.Resource.Attributes { + if s, ok := extractValueAsString(a.Value); ok { + data.AddTag(a.Key, s) + } + } + } + + for _, ils := range b.InstrumentationLibrarySpans { + for _, s := range ils.Spans { + + // Root span + if len(s.ParentSpanId) == 0 { + + data.AddTag(search.RootSpanNameTag, s.Name) + + // Span attrs + for _, a := range s.Attributes { + if s, ok := extractValueAsString(a.Value); ok { + data.AddTag(fmt.Sprint(search.RootSpanPrefix, a.Key), s) + } + } + + // Batch attrs + if b.Resource != nil { + for _, a := range b.Resource.Attributes { + if s, ok := extractValueAsString(a.Value); ok { + data.AddTag(fmt.Sprint(search.RootSpanPrefix, a.Key), s) + } + } + } + } + + // Collect for any spans + data.AddTag(search.SpanNameTag, s.Name) + data.SetStartTimeUnixNano(s.StartTimeUnixNano) + data.SetEndTimeUnixNano(s.EndTimeUnixNano) + + for _, a := range s.Attributes { + if s, ok := extractValueAsString(a.Value); ok { + data.AddTag(a.Key, s) + } + } + } + } + } + + return data.ToBytes() +} + +func extractValueAsString(v *common_v1.AnyValue) (s string, ok bool) { + vv := v.GetValue() + if vv == nil { + return "", false + } + + if s, ok := vv.(*common_v1.AnyValue_StringValue); ok { + return s.StringValue, true + } + + if b, ok := vv.(*common_v1.AnyValue_BoolValue); ok { + return strconv.FormatBool(b.BoolValue), true + } + + if i, ok := vv.(*common_v1.AnyValue_IntValue); ok { + return strconv.FormatInt(i.IntValue, 10), true + } + + if d, ok := vv.(*common_v1.AnyValue_DoubleValue); ok { + return strconv.FormatFloat(d.DoubleValue, 'g', -1, 64), true + } + + return "", false +} diff --git a/modules/distributor/search_data_test.go b/modules/distributor/search_data_test.go new file mode 100644 index 00000000000..ebb2927ad11 --- /dev/null +++ b/modules/distributor/search_data_test.go @@ -0,0 +1,84 @@ +package distributor + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + v1_common "github.com/grafana/tempo/pkg/tempopb/common/v1" + v1_resource "github.com/grafana/tempo/pkg/tempopb/resource/v1" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/tempodb/search" +) + +func TestExtractSearchData(t *testing.T) { + traceIDA := []byte{0x0A, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F} + + testCases := []struct { + name string + trace *tempopb.Trace + id []byte + searchData *tempofb.SearchEntryMutable + }{ + { + name: "trace with root span", + trace: &tempopb.Trace{ + Batches: []*v1.ResourceSpans{ + { + Resource: &v1_resource.Resource{ + Attributes: []*v1_common.KeyValue{ + { + Key: "foo", + Value: &v1_common.AnyValue{ + Value: &v1_common.AnyValue_StringValue{StringValue: "bar"}, + }, + }, + { + Key: "service.name", + Value: &v1_common.AnyValue{ + Value: &v1_common.AnyValue_StringValue{StringValue: "baz"}, + }, + }, + }, + }, + InstrumentationLibrarySpans: []*v1.InstrumentationLibrarySpans{ + { + InstrumentationLibrary: &v1_common.InstrumentationLibrary{ + Name: "test", + }, + Spans: []*v1.Span{ + { + TraceId: traceIDA, + Name: "firstSpan", + }, + }, + }, + }, + }, + }, + }, + id: traceIDA, + searchData: &tempofb.SearchEntryMutable{ + TraceID: traceIDA, + Tags: tempofb.SearchDataMap{ + "foo": []string{"bar"}, + search.RootSpanPrefix + "foo": []string{"bar"}, + search.RootSpanNameTag: []string{"firstSpan"}, + search.SpanNameTag: []string{"firstSpan"}, + search.RootServiceNameTag: []string{"baz"}, + search.ServiceNameTag: []string{"baz"}, + }, + StartTimeUnixNano: 0, + EndTimeUnixNano: 0, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.searchData.ToBytes(), extractSearchData(tc.trace, tc.id)) + }) + } +} diff --git a/modules/frontend/frontend.go b/modules/frontend/frontend.go index e26a384a82f..dad3ddd0305 100644 --- a/modules/frontend/frontend.go +++ b/modules/frontend/frontend.go @@ -19,21 +19,133 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/user" "github.com/grafana/tempo/pkg/tempopb" "github.com/grafana/tempo/pkg/util" ) -// NewTripperware returns a Tripperware configured with a middleware to split requests -func NewTripperware(cfg Config, logger log.Logger, registerer prometheus.Registerer) (queryrange.Tripperware, error) { - level.Info(logger).Log("msg", "creating tripperware in query frontend to shard queries") +const ( + apiPathTraces = "/api/traces" + apiPathSearch = "/api/search" +) + +// NewTripperware returns a Tripperware configured with a middleware to route, split and dedupe requests. +func NewTripperware(cfg Config, apiPrefix string, logger log.Logger, registerer prometheus.Registerer) (queryrange.Tripperware, error) { + level.Info(logger).Log("msg", "creating tripperware in query frontend") + + tracesTripperware := NewTracesTripperware(cfg, logger, registerer) + searchTripperware := NewSearchTripperware() + + return func(next http.RoundTripper) http.RoundTripper { + traces := tracesTripperware(next) + search := searchTripperware(next) + + return newFrontendRoundTripper(apiPrefix, next, traces, search, logger, registerer) + }, nil +} + +type frontendRoundTripper struct { + apiPrefix string + next, traces, search http.RoundTripper + logger log.Logger + queriesPerTenant *prometheus.CounterVec +} + +func newFrontendRoundTripper(apiPrefix string, next, traces, search http.RoundTripper, logger log.Logger, registerer prometheus.Registerer) frontendRoundTripper { queriesPerTenant := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ Namespace: "tempo", Name: "query_frontend_queries_total", Help: "Total queries received per tenant.", }, []string{"tenant"}) + return frontendRoundTripper{ + apiPrefix: apiPrefix, + next: next, + traces: traces, + search: search, + logger: logger, + queriesPerTenant: queriesPerTenant, + } +} + +func (r frontendRoundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) { + start := time.Now() + // tracing instrumentation + span, ctx := opentracing.StartSpanFromContext(req.Context(), "frontend.Tripperware") + defer span.Finish() + + orgID, _ := user.ExtractOrgID(req.Context()) + r.queriesPerTenant.WithLabelValues(orgID).Inc() + span.SetTag("orgID", orgID) + + // for context propagation with traceID set + req = req.WithContext(ctx) + + // route the request to the appropriate RoundTripper + switch op := getOperation(r.apiPrefix, req.URL.Path); op { + case TracesOp: + resp, err = r.traces.RoundTrip(req) + case SearchOp: + resp, err = r.search.RoundTrip(req) + default: + // should never be called + level.Warn(r.logger).Log("msg", "unknown path called in frontend roundtripper", "path", req.URL.Path) + resp, err = r.next.RoundTrip(req) + } + + traceID, _ := middleware.ExtractTraceID(ctx) + statusCode := 500 + var contentLength int64 = 0 + if resp != nil { + statusCode = resp.StatusCode + contentLength = resp.ContentLength + } else if httpResp, ok := httpgrpc.HTTPResponseFromError(err); ok { + statusCode = int(httpResp.Code) + contentLength = int64(len(httpResp.Body)) + } + + level.Info(r.logger).Log( + "tenant", orgID, + "method", req.Method, + "traceID", traceID, + "url", req.URL.RequestURI(), + "duration", time.Since(start).String(), + "response_size", contentLength, + "status", statusCode, + ) + + return +} + +type RequestOp string + +const ( + TracesOp RequestOp = "traces" + SearchOp RequestOp = "search" +) + +func getOperation(prefix, path string) RequestOp { + if !strings.HasPrefix(path, prefix) { + return "" + } + + // remove prefix from path + path = path[len(prefix):] + + switch { + case strings.HasPrefix(path, apiPathTraces): + return TracesOp + case strings.HasPrefix(path, apiPathSearch): + return SearchOp + default: + return "" + } +} + +// NewTracesTripperware creates a new frontend tripperware responsible for handling get traces requests. +func NewTracesTripperware(cfg Config, logger log.Logger, registerer prometheus.Registerer) func(next http.RoundTripper) http.RoundTripper { return func(next http.RoundTripper) http.RoundTripper { // We're constructing middleware in this statement, each middleware wraps the next one from left-to-right // - the Deduper dedupes Span IDs for Zipkin support @@ -42,14 +154,8 @@ func NewTripperware(cfg Config, logger log.Logger, registerer prometheus.Registe rt := NewRoundTripper(next, Deduper(logger), ShardingWare(cfg.QueryShards, logger), RetryWare(cfg.MaxRetries, registerer)) return queryrange.RoundTripFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - // tracing instrumentation - span, ctx := opentracing.StartSpanFromContext(r.Context(), "frontend.Tripperware") - defer span.Finish() - - orgID, _ := user.ExtractOrgID(r.Context()) - queriesPerTenant.WithLabelValues(orgID).Inc() - span.SetTag("orgID", orgID) + // don't start a new span, this is already handled by frontendRoundTripper + span := opentracing.SpanFromContext(r.Context()) // validate traceID _, err := util.ParseTraceID(r) @@ -68,9 +174,6 @@ func NewTripperware(cfg Config, logger log.Logger, registerer prometheus.Registe marshallingFormat = util.ProtobufTypeHeaderValue } - // for context propagation with traceID set - r = r.WithContext(ctx) - // Enforce all communication internal to Tempo to be in protobuf bytes r.Header.Set(util.AcceptHeaderKey, util.ProtobufTypeHeaderValue) @@ -97,79 +200,25 @@ func NewTripperware(cfg Config, logger log.Logger, registerer prometheus.Registe } resp.Body = ioutil.NopCloser(bytes.NewReader(jsonTrace.Bytes())) } - span.SetTag("response marshalling format", marshallingFormat) - traceID, _ := util.ExtractTraceID(ctx) - statusCode := 500 - var contentLength int64 = 0 - if resp != nil { - statusCode = resp.StatusCode - contentLength = resp.ContentLength - } else if httpResp, ok := httpgrpc.HTTPResponseFromError(err); ok { - statusCode = int(httpResp.Code) - contentLength = int64(len(httpResp.Body)) - } - - level.Info(logger).Log( - "tenant", orgID, - "method", r.Method, - "traceID", traceID, - "url", r.URL.RequestURI(), - "duration", time.Since(start).String(), - "response_size", contentLength, - "status", statusCode, - ) - return resp, err }) - }, nil -} - -type Handler interface { - Do(*http.Request) (*http.Response, error) -} - -type Middleware interface { - Wrap(Handler) Handler + } } -// MiddlewareFunc is like http.HandlerFunc, but for Middleware. -type MiddlewareFunc func(Handler) Handler +// NewSearchTripperware creates a new frontend tripperware to handle search and search tags requests. +func NewSearchTripperware() queryrange.Tripperware { + return func(rt http.RoundTripper) http.RoundTripper { + return queryrange.RoundTripFunc(func(r *http.Request) (*http.Response, error) { + orgID, _ := user.ExtractOrgID(r.Context()) -// Wrap implements Middleware. -func (q MiddlewareFunc) Wrap(h Handler) Handler { - return q(h) -} + r.Header.Set(user.OrgIDHeaderName, orgID) + r.RequestURI = querierPrefix + r.RequestURI -func MergeMiddlewares(middleware ...Middleware) Middleware { - return MiddlewareFunc(func(next Handler) Handler { - for i := len(middleware) - 1; i >= 0; i-- { - next = middleware[i].Wrap(next) - } - return next - }) -} - -type roundTripper struct { - next http.RoundTripper - handler Handler -} + resp, err := rt.RoundTrip(r) -// NewRoundTripper merges a set of middlewares into an handler, then inject it into the `next` roundtripper -func NewRoundTripper(next http.RoundTripper, middlewares ...Middleware) http.RoundTripper { - transport := roundTripper{ - next: next, + return resp, err + }) } - transport.handler = MergeMiddlewares(middlewares...).Wrap(&transport) - return transport -} - -func (q roundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - return q.handler.Do(r) -} - -// Do implements Handler. -func (q roundTripper) Do(r *http.Request) (*http.Response, error) { - return q.next.RoundTrip(r) } diff --git a/modules/frontend/frontend_test.go b/modules/frontend/frontend_test.go new file mode 100644 index 00000000000..c9d832ba84e --- /dev/null +++ b/modules/frontend/frontend_test.go @@ -0,0 +1,101 @@ +package frontend + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" + "testing" + + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockNextTripperware struct{} + +func (s *mockNextTripperware) RoundTrip(_ *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(bytes.NewReader([]byte("next"))), + }, nil +} + +type mockTracesTripperware struct{} + +func (s *mockTracesTripperware) RoundTrip(_ *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(bytes.NewReader([]byte("traces"))), + }, nil +} + +type mockSearchTripperware struct{} + +func (s *mockSearchTripperware) RoundTrip(_ *http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(bytes.NewReader([]byte("search"))), + }, nil +} + +func TestFrontendRoundTripper(t *testing.T) { + next := &mockNextTripperware{} + traces := &mockTracesTripperware{} + search := &mockSearchTripperware{} + + testCases := []struct { + name string + apiPrefix string + endpoint string + response string + }{ + { + name: "next tripper", + apiPrefix: "", + endpoint: "/api/foo", + response: "next", + }, + { + name: "traces tripper", + apiPrefix: "", + endpoint: apiPathTraces + "/X", + response: "traces", + }, + { + name: "search tripper", + apiPrefix: "", + endpoint: apiPathSearch + "/X", + response: "search", + }, + { + name: "traces tripper with prefix", + apiPrefix: "/tempo", + endpoint: "/tempo" + apiPathTraces + "/X", + response: "traces", + }, + { + name: "next tripper with a misleading prefix", + apiPrefix: "/api/traces", + endpoint: "/api/traces" + apiPathSearch + "/api/traces", + response: "search", + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + frontendTripper := newFrontendRoundTripper(tt.apiPrefix, next, traces, search, log.NewNopLogger(), prometheus.NewRegistry()) + + req := &http.Request{ + URL: &url.URL{ + Path: tt.endpoint, + }, + } + resp, err := frontendTripper.RoundTrip(req) + require.NoError(t, err) + require.NotNil(t, resp) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.Equal(t, body, []byte(tt.response)) + }) + } +} diff --git a/modules/frontend/middleware.go b/modules/frontend/middleware.go new file mode 100644 index 00000000000..a1fc72c9a90 --- /dev/null +++ b/modules/frontend/middleware.go @@ -0,0 +1,51 @@ +package frontend + +import "net/http" + +type Handler interface { + Do(*http.Request) (*http.Response, error) +} + +type Middleware interface { + Wrap(Handler) Handler +} + +// MiddlewareFunc is like http.HandlerFunc, but for Middleware. +type MiddlewareFunc func(Handler) Handler + +// Wrap implements Middleware. +func (q MiddlewareFunc) Wrap(h Handler) Handler { + return q(h) +} + +func MergeMiddlewares(middleware ...Middleware) Middleware { + return MiddlewareFunc(func(next Handler) Handler { + for i := len(middleware) - 1; i >= 0; i-- { + next = middleware[i].Wrap(next) + } + return next + }) +} + +type roundTripper struct { + next http.RoundTripper + handler Handler +} + +// NewRoundTripper merges a set of middlewares into an handler, then inject it into the `next` roundtripper +func NewRoundTripper(next http.RoundTripper, middlewares ...Middleware) http.RoundTripper { + transport := roundTripper{ + next: next, + } + transport.handler = MergeMiddlewares(middlewares...).Wrap(&transport) + return transport +} + +func (q roundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + return q.handler.Do(r) +} + +// Do implements Handler. +func (q roundTripper) Do(r *http.Request) (*http.Response, error) { + return q.next.RoundTrip(r) +} diff --git a/modules/ingester/flush.go b/modules/ingester/flush.go index 505fc803119..0ebc03906be 100644 --- a/modules/ingester/flush.go +++ b/modules/ingester/flush.go @@ -170,6 +170,9 @@ func (i *Ingester) sweepInstance(instance *instance, immediate bool) { if err != nil { level.Error(log.WithUserID(instance.instanceID, log.Logger)).Log("msg", "failed to complete block", "err", err) } + + // periodically purge tag cache, keep tags within complete block timeout (i.e. data that is locally) + instance.PurgeExpiredSearchTags(time.Now().Add(-i.cfg.CompleteBlockTimeout)) } func (i *Ingester) flushLoop(j int) { diff --git a/modules/ingester/ingester.go b/modules/ingester/ingester.go index eafd5831ebb..556a1521872 100644 --- a/modules/ingester/ingester.go +++ b/modules/ingester/ingester.go @@ -106,6 +106,9 @@ func (i *Ingester) starting(ctx context.Context) error { return fmt.Errorf("failed to rediscover local blocks %w", err) } + // Search data is considered experimental and removed on every startup. + i.clearSearchData() + // Now that user states have been created, we can start the lifecycler. // Important: we want to keep lifecycler running until we ask it to stop, so we need to give it independent context if err := i.lifecycler.StartAsync(context.Background()); err != nil { @@ -217,7 +220,14 @@ func (i *Ingester) PushBytes(ctx context.Context, req *tempopb.PushBytesRequest) // Unmarshal and push each trace for i := range req.Traces { - err := instance.PushBytes(ctx, req.Ids[i].Slice, req.Traces[i].Slice) + + // Search data is optional. + var searchData []byte + if len(req.SearchData) > i && len(req.SearchData[i].Slice) > 0 { + searchData = req.SearchData[i].Slice + } + + err := instance.PushBytes(ctx, req.Ids[i].Slice, req.Traces[i].Slice, searchData) if err != nil { return nil, err } diff --git a/modules/ingester/ingester_search.go b/modules/ingester/ingester_search.go new file mode 100644 index 00000000000..67548ea9109 --- /dev/null +++ b/modules/ingester/ingester_search.go @@ -0,0 +1,76 @@ +package ingester + +import ( + "context" + + "github.com/cortexproject/cortex/pkg/util/log" + "github.com/go-kit/kit/log/level" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/weaveworks/common/user" +) + +const searchDir = "search" + +func (i *Ingester) Search(ctx context.Context, req *tempopb.SearchRequest) (*tempopb.SearchResponse, error) { + instanceID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + inst, ok := i.getInstanceByID(instanceID) + if !ok || inst == nil { + return &tempopb.SearchResponse{}, nil + } + + res, err := inst.Search(ctx, req) + if err != nil { + return nil, err + } + + return res, nil +} + +func (i *Ingester) SearchTags(ctx context.Context, req *tempopb.SearchTagsRequest) (*tempopb.SearchTagsResponse, error) { + instanceID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + inst, ok := i.getInstanceByID(instanceID) + if !ok || inst == nil { + return &tempopb.SearchTagsResponse{}, nil + } + + tags := inst.GetSearchTags() + + resp := &tempopb.SearchTagsResponse{ + TagNames: tags, + } + + return resp, nil +} + +func (i *Ingester) SearchTagValues(ctx context.Context, req *tempopb.SearchTagValuesRequest) (*tempopb.SearchTagValuesResponse, error) { + instanceID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + inst, ok := i.getInstanceByID(instanceID) + if !ok || inst == nil { + return &tempopb.SearchTagValuesResponse{}, nil + } + + vals := inst.GetSearchTagValues(req.TagName) + + resp := &tempopb.SearchTagValuesResponse{ + TagValues: vals, + } + + return resp, nil +} + +func (i *Ingester) clearSearchData() { + // clear wal + err := i.store.WAL().ClearFolder(searchDir) + if err != nil { + level.Error(log.Logger).Log("msg", "error clearing search data from wal") + } +} diff --git a/modules/ingester/ingester_test.go b/modules/ingester/ingester_test.go index 7e0587c62a4..f220e907314 100644 --- a/modules/ingester/ingester_test.go +++ b/modules/ingester/ingester_test.go @@ -159,7 +159,7 @@ func TestDeprecatedPush(t *testing.T) { func TestWal(t *testing.T) { tmpDir, err := ioutil.TempDir("/tmp", "") - assert.NoError(t, err, "unexpected error getting tempdir") + require.NoError(t, err, "unexpected error getting tempdir") defer os.RemoveAll(tmpDir) ctx := user.InjectOrgID(context.Background(), "test") @@ -169,14 +169,14 @@ func TestWal(t *testing.T) { foundTrace, err := ingester.FindTraceByID(ctx, &tempopb.TraceByIDRequest{ TraceID: traceID, }) - assert.NoError(t, err, "unexpected error querying") - assert.Equal(t, foundTrace.Trace, traces[pos]) + require.NoError(t, err, "unexpected error querying") + require.Equal(t, foundTrace.Trace, traces[pos]) } // force cut all traces for _, instance := range ingester.instances { err := instance.CutCompleteTraces(0, true) - assert.NoError(t, err, "unexpected error cutting traces") + require.NoError(t, err, "unexpected error cutting traces") } // create new ingester. this should replay wal! @@ -187,9 +187,9 @@ func TestWal(t *testing.T) { foundTrace, err := ingester.FindTraceByID(ctx, &tempopb.TraceByIDRequest{ TraceID: traceID, }) - assert.NoError(t, err, "unexpected error querying") + require.NoError(t, err, "unexpected error querying") equal := proto.Equal(traces[i], foundTrace.Trace) - assert.True(t, equal) + require.True(t, equal) } // a block that has been replayed should have a flush queue entry to complete it @@ -198,23 +198,23 @@ func TestWal(t *testing.T) { time.Sleep(100 * time.Millisecond) } - assert.Len(t, ingester.instances["test"].completingBlocks, 0) - assert.Len(t, ingester.instances["test"].completeBlocks, 1) + require.Len(t, ingester.instances["test"].completingBlocks, 0) + require.Len(t, ingester.instances["test"].completeBlocks, 1) // should be able to find old traces that were replayed for i, traceID := range traceIDs { foundTrace, err := ingester.FindTraceByID(ctx, &tempopb.TraceByIDRequest{ TraceID: traceID, }) - assert.NoError(t, err, "unexpected error querying") + require.NoError(t, err, "unexpected error querying") equal := proto.Equal(traces[i], foundTrace.Trace) - assert.True(t, equal) + require.True(t, equal) } } func TestFlush(t *testing.T) { tmpDir, err := ioutil.TempDir("/tmp", "") - assert.NoError(t, err, "unexpected error getting tempdir") + require.NoError(t, err, "unexpected error getting tempdir") defer os.RemoveAll(tmpDir) ctx := user.InjectOrgID(context.Background(), "test") @@ -224,13 +224,13 @@ func TestFlush(t *testing.T) { foundTrace, err := ingester.FindTraceByID(ctx, &tempopb.TraceByIDRequest{ TraceID: traceID, }) - assert.NoError(t, err, "unexpected error querying") - assert.Equal(t, foundTrace.Trace, traces[pos]) + require.NoError(t, err, "unexpected error querying") + require.Equal(t, foundTrace.Trace, traces[pos]) } // stopping the ingester should force cut all live traces to disk err = ingester.stopping(nil) - assert.NoError(t, err) + require.NoError(t, err) // create new ingester. this should replay wal! ingester, _, _ = defaultIngester(t, tmpDir) @@ -240,9 +240,9 @@ func TestFlush(t *testing.T) { foundTrace, err := ingester.FindTraceByID(ctx, &tempopb.TraceByIDRequest{ TraceID: traceID, }) - assert.NoError(t, err, "unexpected error querying") + require.NoError(t, err, "unexpected error querying") equal := proto.Equal(traces[i], foundTrace.Trace) - assert.True(t, equal) + require.True(t, equal) } } diff --git a/modules/ingester/instance.go b/modules/ingester/instance.go index 74aa91253ef..082f9a64787 100644 --- a/modules/ingester/instance.go +++ b/modules/ingester/instance.go @@ -30,6 +30,7 @@ import ( "github.com/grafana/tempo/tempodb/backend/local" "github.com/grafana/tempo/tempodb/encoding" "github.com/grafana/tempo/tempodb/encoding/common" + "github.com/grafana/tempo/tempodb/search" "github.com/grafana/tempo/tempodb/wal" ) @@ -66,6 +67,11 @@ type instance struct { completingBlocks []*wal.AppendBlock completeBlocks []*wal.LocalBlock + searchHeadBlock *searchStreamingBlockEntry + searchAppendBlocks map[*wal.AppendBlock]*searchStreamingBlockEntry + searchCompleteBlocks map[*wal.LocalBlock]*searchLocalBlockEntry + searchTagCache *search.TagCache + lastBlockCut time.Time instanceID string @@ -81,9 +87,22 @@ type instance struct { hash hash.Hash32 } +type searchStreamingBlockEntry struct { + b *search.StreamingSearchBlock + mtx sync.RWMutex +} + +type searchLocalBlockEntry struct { + b search.SearchableBlock + mtx sync.RWMutex +} + func newInstance(instanceID string, limiter *Limiter, writer tempodb.Writer, l *local.Backend) (*instance, error) { i := &instance{ - traces: map[uint32]*trace{}, + traces: map[uint32]*trace{}, + searchAppendBlocks: map[*wal.AppendBlock]*searchStreamingBlockEntry{}, + searchCompleteBlocks: map[*wal.LocalBlock]*searchLocalBlockEntry{}, + searchTagCache: search.NewTagCache(), instanceID: instanceID, tracesCreatedTotal: metricTracesCreatedTotal.WithLabelValues(instanceID), @@ -135,11 +154,11 @@ func (i *instance) Push(ctx context.Context, req *tempopb.PushRequest) error { } trace := i.getOrCreateTrace(id) - return trace.Push(ctx, buffer) + return trace.Push(ctx, i.instanceID, buffer, nil) } // PushBytes is used to push an unmarshalled tempopb.Trace to the instance -func (i *instance) PushBytes(ctx context.Context, id []byte, traceBytes []byte) error { +func (i *instance) PushBytes(ctx context.Context, id []byte, traceBytes []byte, searchData []byte) error { if !validation.ValidTraceID(id) { return status.Errorf(codes.InvalidArgument, "%s is not a valid traceid", hex.EncodeToString(id)) } @@ -150,11 +169,15 @@ func (i *instance) PushBytes(ctx context.Context, id []byte, traceBytes []byte) return status.Errorf(codes.FailedPrecondition, "%s max live traces per tenant exceeded: %v", overrides.ErrorPrefixLiveTracesExceeded, err) } + if searchData != nil { + i.RecordSearchLookupValues(searchData) + } + i.tracesMtx.Lock() defer i.tracesMtx.Unlock() trace := i.getOrCreateTrace(id) - return trace.Push(ctx, traceBytes) + return trace.Push(ctx, i.instanceID, traceBytes, searchData) } // Moves any complete traces out of the map to complete traces @@ -169,7 +192,7 @@ func (i *instance) CutCompleteTraces(cutoff time.Duration, immediate bool) error return err } - err = i.writeTraceToHeadBlock(t.traceID, out) + err = i.writeTraceToHeadBlock(t.traceID, out, t.searchData) if err != nil { return err } @@ -239,16 +262,38 @@ func (i *instance) CompleteBlock(blockID uuid.UUID) error { return errors.Wrap(err, "error creating ingester block") } + // Search data (optional) i.blocksMtx.Lock() - i.completeBlocks = append(i.completeBlocks, ingesterBlock) + oldSearch := i.searchAppendBlocks[completingBlock] i.blocksMtx.Unlock() + var newSearch search.SearchableBlock + if oldSearch != nil { + err = search.NewBackendSearchBlock(oldSearch.b, i.local, backendBlock.BlockMeta().BlockID, backendBlock.BlockMeta().TenantID, backend.EncSnappy, 0) + if err != nil { + return err + } + + newSearch = search.OpenBackendSearchBlock(i.local, backendBlock.BlockMeta().BlockID, backendBlock.BlockMeta().TenantID) + } + + i.blocksMtx.Lock() + defer i.blocksMtx.Unlock() + + if newSearch != nil { + i.searchCompleteBlocks[ingesterBlock] = &searchLocalBlockEntry{ + b: newSearch, + } + } + i.completeBlocks = append(i.completeBlocks, ingesterBlock) + return nil } -// nolint:interfacer func (i *instance) ClearCompletingBlock(blockID uuid.UUID) error { i.blocksMtx.Lock() + defer i.blocksMtx.Unlock() + var completingBlock *wal.AppendBlock for j, iterBlock := range i.completingBlocks { if iterBlock.BlockID() == blockID { @@ -257,13 +302,20 @@ func (i *instance) ClearCompletingBlock(blockID uuid.UUID) error { break } } - i.blocksMtx.Unlock() if completingBlock != nil { + entry := i.searchAppendBlocks[completingBlock] + if entry != nil { + entry.mtx.Lock() + defer entry.mtx.Unlock() + _ = entry.b.Clear() + delete(i.searchAppendBlocks, completingBlock) + } + return completingBlock.Clear() } - return fmt.Errorf("Error finding wal completingBlock to clear") + return errors.New("Error finding wal completingBlock to clear") } // GetBlockToBeFlushed gets a list of blocks that can be flushed to the backend @@ -294,6 +346,14 @@ func (i *instance) ClearFlushedBlocks(completeBlockTimeout time.Duration) error if flushedTime.Add(completeBlockTimeout).Before(time.Now()) { i.completeBlocks = append(i.completeBlocks[:idx], i.completeBlocks[idx+1:]...) + + searchEntry := i.searchCompleteBlocks[b] + if searchEntry != nil { + searchEntry.mtx.Lock() + defer searchEntry.mtx.Unlock() + delete(i.searchCompleteBlocks, b) + } + err = i.local.ClearBlock(b.BlockMeta().BlockID, i.instanceID) if err == nil { metricBlocksClearedTotal.Inc() @@ -390,7 +450,8 @@ func (i *instance) getOrCreateTrace(traceID []byte) *trace { } maxBytes := i.limiter.limits.MaxBytesPerTrace(i.instanceID) - trace = newTrace(maxBytes, traceID) + maxSearchBytes := i.limiter.limits.MaxSearchBytesPerTrace(i.instanceID) + trace = newTrace(traceID, maxBytes, maxSearchBytes) i.traces[fp] = trace i.tracesCreatedTotal.Inc() i.traceCount.Inc() @@ -407,10 +468,33 @@ func (i *instance) tokenForTraceID(id []byte) uint32 { // resetHeadBlock() should be called under lock func (i *instance) resetHeadBlock() error { + oldHeadBlock := i.headBlock var err error - i.headBlock, err = i.writer.WAL().NewBlock(uuid.New(), i.instanceID, model.CurrentEncoding) + newHeadBlock, err := i.writer.WAL().NewBlock(uuid.New(), i.instanceID, model.CurrentEncoding) + if err != nil { + return err + } + + i.headBlock = newHeadBlock i.lastBlockCut = time.Now() - return err + + // Create search data wal file + f, err := i.writer.WAL().NewFile(i.headBlock.BlockID(), i.instanceID, searchDir, "searchdata") + if err != nil { + return err + } + + b, err := search.NewStreamingSearchBlockForFile(f) + if err != nil { + return err + } + if i.searchHeadBlock != nil { + i.searchAppendBlocks[oldHeadBlock] = i.searchHeadBlock + } + i.searchHeadBlock = &searchStreamingBlockEntry{ + b: b, + } + return nil } func (i *instance) tracesToCut(cutoff time.Duration, immediate bool) []*trace { @@ -431,11 +515,24 @@ func (i *instance) tracesToCut(cutoff time.Duration, immediate bool) []*trace { return tracesToCut } -func (i *instance) writeTraceToHeadBlock(id common.ID, b []byte) error { +func (i *instance) writeTraceToHeadBlock(id common.ID, b []byte, searchData [][]byte) error { i.blocksMtx.Lock() defer i.blocksMtx.Unlock() - return i.headBlock.Write(id, b) + err := i.headBlock.Write(id, b) + if err != nil { + return err + } + + entry := i.searchHeadBlock + if entry != nil { + entry.mtx.Lock() + err := entry.b.Append(context.TODO(), id, searchData) + entry.mtx.Unlock() + return err + } + + return nil } // pushRequestTraceID gets the TraceID of the first span in the batch and assumes its the trace ID throughout @@ -488,8 +585,11 @@ func (i *instance) rediscoverLocalBlocks(ctx context.Context) error { return err } + //sb := search.OpenBackendSearchBlock(i.local, b.BlockMeta().BlockID, b.BlockMeta().TenantID) + i.blocksMtx.Lock() i.completeBlocks = append(i.completeBlocks, ib) + //i.searchCompleteBlocks[ib] = sb i.blocksMtx.Unlock() level.Info(log.Logger).Log("msg", "reloaded local block", "tenantID", i.instanceID, "block", id.String(), "flushed", ib.FlushedTime()) diff --git a/modules/ingester/instance_search.go b/modules/ingester/instance_search.go new file mode 100644 index 00000000000..0159b33e4fe --- /dev/null +++ b/modules/ingester/instance_search.go @@ -0,0 +1,172 @@ +package ingester + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/tempodb/search" + "github.com/grafana/tempo/tempodb/wal" +) + +func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tempopb.SearchResponse, error) { + + maxResults := 20 + if req.Limit != 0 { + maxResults = int(req.Limit) + } + + p := search.NewSearchPipeline(req) + + sr := search.NewResults() + defer sr.Close() + + i.searchLiveTraces(ctx, p, sr) + i.searchWAL(ctx, p, sr) + i.searchLocalBlocks(ctx, p, sr) + + sr.AllWorkersStarted() + + resultsMap := map[string]*tempopb.TraceSearchMetadata{} + + for result := range sr.Results() { + // Dedupe/combine results + if existing := resultsMap[result.TraceID]; existing != nil { + search.CombineSearchResults(existing, result) + } else { + resultsMap[result.TraceID] = result + } + + if len(resultsMap) >= maxResults { + break + } + } + + results := make([]*tempopb.TraceSearchMetadata, 0, len(resultsMap)) + for _, result := range resultsMap { + results = append(results, result) + } + + // Sort + sort.Slice(results, func(i, j int) bool { + return results[i].StartTimeUnixNano > results[j].StartTimeUnixNano + }) + + return &tempopb.SearchResponse{ + Traces: results, + Metrics: &tempopb.SearchMetrics{ + InspectedTraces: sr.TracesInspected(), + InspectedBytes: sr.BytesInspected(), + InspectedBlocks: sr.BlocksInspected(), + }, + }, nil +} + +func (i *instance) searchLiveTraces(ctx context.Context, p search.Pipeline, sr *search.Results) { + sr.StartWorker() + + go func() { + defer sr.FinishWorker() + + i.tracesMtx.Lock() + defer i.tracesMtx.Unlock() + + for _, t := range i.traces { + if sr.Quit() { + return + } + + sr.AddTraceInspected(1) + + var result *tempopb.TraceSearchMetadata + + // Search and combine from all segments for the trace. + for _, s := range t.searchData { + sr.AddBytesInspected(uint64(len(s))) + + entry := tempofb.SearchEntryFromBytes(s) + if p.Matches(entry) { + newResult := search.GetSearchResultFromData(entry) + if result != nil { + search.CombineSearchResults(result, newResult) + } else { + result = newResult + } + } + } + + if result != nil { + if quit := sr.AddResult(ctx, result); quit { + return + } + } + } + }() +} + +func (i *instance) searchWAL(ctx context.Context, p search.Pipeline, sr *search.Results) { + searchFunc := func(k *wal.AppendBlock, e *searchStreamingBlockEntry) { + defer sr.FinishWorker() + + e.mtx.RLock() + defer e.mtx.RUnlock() + + err := e.b.Search(ctx, p, sr) + if err != nil { + fmt.Println("error searching wal block", k.BlockID().String(), err) + } + } + + i.blocksMtx.Lock() + defer i.blocksMtx.Unlock() + + // head block + sr.StartWorker() + go searchFunc(i.headBlock, i.searchHeadBlock) + + // completing blocks + for b, e := range i.searchAppendBlocks { + sr.StartWorker() + go searchFunc(b, e) + } +} + +func (i *instance) searchLocalBlocks(ctx context.Context, p search.Pipeline, sr *search.Results) { + i.blocksMtx.Lock() + defer i.blocksMtx.Unlock() + + for b, e := range i.searchCompleteBlocks { + sr.StartWorker() + go func(b *wal.LocalBlock, e *searchLocalBlockEntry) { + defer sr.FinishWorker() + + e.mtx.RLock() + defer e.mtx.RUnlock() + + err := e.b.Search(ctx, p, sr) + if err != nil { + fmt.Println("error searching local block", b.BlockMeta().BlockID.String(), err) + } + }(b, e) + } +} + +func (i *instance) GetSearchTags() []string { + return i.searchTagCache.GetNames() +} + +func (i *instance) GetSearchTagValues(tagName string) []string { + return i.searchTagCache.GetValues(tagName) +} + +func (i *instance) RecordSearchLookupValues(b []byte) { + s := tempofb.SearchEntryFromBytes(b) + i.searchTagCache.SetData(time.Now(), s) +} + +func (i *instance) PurgeExpiredSearchTags(before time.Time) { + i.searchTagCache.PurgeExpired(before) +} diff --git a/modules/ingester/instance_search_test.go b/modules/ingester/instance_search_test.go new file mode 100644 index 00000000000..f70a4483a93 --- /dev/null +++ b/modules/ingester/instance_search_test.go @@ -0,0 +1,472 @@ +package ingester + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + "github.com/google/uuid" + "github.com/grafana/tempo/modules/overrides" + "github.com/grafana/tempo/pkg/model" + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/util" + "github.com/grafana/tempo/pkg/util/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkEqual(t *testing.T, ids [][]byte, sr *tempopb.SearchResponse) { + for _, meta := range sr.Traces { + parsedTraceID, err := util.HexStringToTraceID(meta.TraceID) + assert.NoError(t, err) + + present := false + for _, id := range ids { + if bytes.Equal(parsedTraceID, id) { + present = true + } + } + assert.True(t, present) + } +} + +func TestInstanceSearch(t *testing.T) { + limits, err := overrides.NewOverrides(overrides.Limits{}) + assert.NoError(t, err, "unexpected error creating limits") + limiter := NewLimiter(limits, &ringCountMock{count: 1}, 1) + + tempDir, err := ioutil.TempDir("/tmp", "") + assert.NoError(t, err, "unexpected error getting temp dir") + defer os.RemoveAll(tempDir) + + ingester, _, _ := defaultIngester(t, tempDir) + i, err := newInstance("fake", limiter, ingester.store, ingester.local) + assert.NoError(t, err, "unexpected error creating new instance") + + numTraces := 500 + searchAnnotatedFractionDenominator := 100 + ids := [][]byte{} + + // add dummy search data + var tagKey = "foo" + var tagValue = "bar" + + for j := 0; j < numTraces; j++ { + id := make([]byte, 16) + rand.Read(id) + + trace := test.MakeTrace(10, id) + model.SortTrace(trace) + traceBytes, err := trace.Marshal() + require.NoError(t, err) + + // annotate just a fraction of traces with search data + var searchData []byte + if j%searchAnnotatedFractionDenominator == 0 { + data := &tempofb.SearchEntryMutable{} + data.TraceID = id + data.AddTag(tagKey, tagValue) + searchData = data.ToBytes() + + // these are the only ids we want to test against + ids = append(ids, id) + } + + // searchData will be nil if not + err = i.PushBytes(context.Background(), id, traceBytes, searchData) + require.NoError(t, err) + + assert.Equal(t, int(i.traceCount.Load()), len(i.traces)) + } + + var req = &tempopb.SearchRequest{ + Tags: map[string]string{}, + } + req.Tags[tagKey] = tagValue + + sr, err := i.Search(context.Background(), req) + assert.NoError(t, err) + assert.Len(t, sr.Traces, numTraces/searchAnnotatedFractionDenominator) + // todo: test that returned results are in sorted time order, create order of id's beforehand + checkEqual(t, ids, sr) + + // Test after appending to WAL + err = i.CutCompleteTraces(0, true) + require.NoError(t, err) + assert.Equal(t, int(i.traceCount.Load()), len(i.traces)) + + sr, err = i.Search(context.Background(), req) + assert.NoError(t, err) + assert.Len(t, sr.Traces, numTraces/searchAnnotatedFractionDenominator) + checkEqual(t, ids, sr) + + // Test after cutting new headblock + blockID, err := i.CutBlockIfReady(0, 0, true) + require.NoError(t, err) + assert.NotEqual(t, blockID, uuid.Nil) + + sr, err = i.Search(context.Background(), req) + assert.NoError(t, err) + assert.Len(t, sr.Traces, numTraces/searchAnnotatedFractionDenominator) + checkEqual(t, ids, sr) + + // Test after completing a block + err = i.CompleteBlock(blockID) + require.NoError(t, err) + + sr, err = i.Search(context.Background(), req) + assert.NoError(t, err) + assert.Len(t, sr.Traces, numTraces/searchAnnotatedFractionDenominator) + checkEqual(t, ids, sr) + + err = ingester.stopping(nil) + require.NoError(t, err) + + // create new ingester. this should replay wal! + ingester, _, _ = defaultIngester(t, tempDir) + + i, ok := ingester.getInstanceByID("fake") + assert.True(t, ok) + + sr, err = i.Search(context.Background(), req) + assert.NoError(t, err) + // note: search is experimental and removed on every startup. Verify no search results now + assert.Len(t, sr.Traces, 0) +} + +func TestInstanceSearchNoData(t *testing.T) { + limits, err := overrides.NewOverrides(overrides.Limits{}) + assert.NoError(t, err, "unexpected error creating limits") + limiter := NewLimiter(limits, &ringCountMock{count: 1}, 1) + + tempDir, err := ioutil.TempDir("/tmp", "") + assert.NoError(t, err, "unexpected error getting temp dir") + defer os.RemoveAll(tempDir) + + ingester, _, _ := defaultIngester(t, tempDir) + i, err := newInstance("fake", limiter, ingester.store, ingester.local) + assert.NoError(t, err, "unexpected error creating new instance") + + var req = &tempopb.SearchRequest{ + Tags: map[string]string{}, + } + + sr, err := i.Search(context.Background(), req) + assert.NoError(t, err) + require.Len(t, sr.Traces, 0) +} + +func TestInstanceSearchDoesNotRace(t *testing.T) { + limits, err := overrides.NewOverrides(overrides.Limits{}) + require.NoError(t, err) + limiter := NewLimiter(limits, &ringCountMock{count: 1}, 1) + + ingester, _, _ := defaultIngester(t, t.TempDir()) + i, err := newInstance("fake", limiter, ingester.store, ingester.local) + require.NoError(t, err) + + // add dummy search data + var tagKey = "foo" + var tagValue = "bar" + + var req = &tempopb.SearchRequest{ + Tags: map[string]string{tagKey: tagValue}, + } + + end := make(chan struct{}) + + concurrent := func(f func()) { + for { + select { + case <-end: + return + default: + f() + } + } + } + + go concurrent(func() { + id := make([]byte, 16) + rand.Read(id) + + trace := test.MakeTrace(10, id) + traceBytes, err := trace.Marshal() + require.NoError(t, err) + + searchData := &tempofb.SearchEntryMutable{} + searchData.TraceID = id + searchData.AddTag(tagKey, tagValue) + searchBytes := searchData.ToBytes() + + // searchData will be nil if not + err = i.PushBytes(context.Background(), id, traceBytes, searchBytes) + require.NoError(t, err) + }) + + go concurrent(func() { + err := i.CutCompleteTraces(0, true) + require.NoError(t, err, "error cutting complete traces") + }) + + go concurrent(func() { + _, err := i.FindTraceByID(context.Background(), []byte{0x01}) + assert.NoError(t, err, "error finding trace by id") + }) + + go concurrent(func() { + // Cut wal, complete, delete wal, then flush + blockID, _ := i.CutBlockIfReady(0, 0, true) + if blockID != uuid.Nil { + err := i.CompleteBlock(blockID) + require.NoError(t, err) + err = i.ClearCompletingBlock(blockID) + require.NoError(t, err) + block := i.GetBlockToBeFlushed(blockID) + require.NotNil(t, block) + err = ingester.store.WriteBlock(context.Background(), block) + require.NoError(t, err) + } + }) + + go concurrent(func() { + err = i.ClearFlushedBlocks(0) + require.NoError(t, err) + }) + + go concurrent(func() { + _, err := i.Search(context.Background(), req) + require.NoError(t, err, "error finding trace by id") + }) + + go concurrent(func() { + i.GetSearchTags() + }) + + go concurrent(func() { + i.GetSearchTagValues(tagKey) + }) + + time.Sleep(2000 * time.Millisecond) + close(end) + // Wait for go funcs to quit before + // exiting and cleaning up + time.Sleep(2 * time.Second) +} + +func TestWALBlockDeletedDuringSearch(t *testing.T) { + limits, err := overrides.NewOverrides(overrides.Limits{}) + require.NoError(t, err) + limiter := NewLimiter(limits, &ringCountMock{count: 1}, 1) + + ingester, _, _ := defaultIngester(t, t.TempDir()) + i, err := newInstance("fake", limiter, ingester.store, ingester.local) + require.NoError(t, err) + + end := make(chan struct{}) + + concurrent := func(f func()) { + for { + select { + case <-end: + return + default: + f() + } + } + } + + for j := 0; j < 500; j++ { + id := make([]byte, 16) + rand.Read(id) + + trace := test.MakeTrace(10, id) + traceBytes, err := trace.Marshal() + require.NoError(t, err) + + entry := &tempofb.SearchEntryMutable{} + entry.TraceID = id + entry.AddTag("foo", "bar") + searchBytes := entry.ToBytes() + + err = i.PushBytes(context.Background(), id, traceBytes, searchBytes) + require.NoError(t, err) + } + + err = i.CutCompleteTraces(0, true) + require.NoError(t, err) + + blockID, err := i.CutBlockIfReady(0, 0, true) + require.NoError(t, err) + + go concurrent(func() { + _, err := i.Search(context.Background(), &tempopb.SearchRequest{ + Tags: map[string]string{ + // Not present in the data, so it will be an exhaustive + // search + "wuv": "xyz", + }, + }) + require.NoError(t, err) + }) + + // Let search get going + time.Sleep(100 * time.Millisecond) + + err = i.ClearCompletingBlock(blockID) + require.NoError(t, err) + + // Wait for go funcs to quit before + // exiting and cleaning up + close(end) + time.Sleep(2 * time.Second) +} + +func TestInstanceSearchMetrics(t *testing.T) { + + i := defaultInstance(t, t.TempDir()) + + numTraces := uint32(500) + numBytes := uint64(0) + for j := uint32(0); j < numTraces; j++ { + id := make([]byte, 16) + rand.Read(id) + + trace := test.MakeTrace(10, id) + traceBytes, err := trace.Marshal() + require.NoError(t, err) + + data := &tempofb.SearchEntryMutable{} + data.TraceID = id + data.AddTag("foo", "bar") + searchData := data.ToBytes() + + numBytes += uint64(len(searchData)) + + err = i.PushBytes(context.Background(), id, traceBytes, searchData) + require.NoError(t, err) + + assert.Equal(t, int(i.traceCount.Load()), len(i.traces)) + } + + search := func() *tempopb.SearchMetrics { + sr, err := i.Search(context.Background(), &tempopb.SearchRequest{ + Tags: map[string]string{"nomatch": "nomatch"}, + }) + require.NoError(t, err) + return sr.Metrics + } + + // Live traces + m := search() + require.Equal(t, numTraces, m.InspectedTraces) + require.Equal(t, numBytes, m.InspectedBytes) + require.Equal(t, uint32(1), m.InspectedBlocks) // 1 head block + + // Test after appending to WAL + err := i.CutCompleteTraces(0, true) + require.NoError(t, err) + m = search() + require.Equal(t, numTraces, m.InspectedTraces) + require.Equal(t, numBytes, m.InspectedBytes) + require.Equal(t, uint32(1), m.InspectedBlocks) // 1 head block + + // Test after cutting new headblock + blockID, err := i.CutBlockIfReady(0, 0, true) + require.NoError(t, err) + m = search() + require.Equal(t, numTraces, m.InspectedTraces) + require.Equal(t, numBytes, m.InspectedBytes) + require.Equal(t, uint32(2), m.InspectedBlocks) // 1 head block, 1 completing block + + // Test after completing a block + err = i.CompleteBlock(blockID) + require.NoError(t, err) + err = i.ClearCompletingBlock(blockID) + require.NoError(t, err) + // Complete blocks are paged and search data is normalized, therefore smaller than individual wal entries. + m = search() + require.Equal(t, numTraces, m.InspectedTraces) + require.Less(t, m.InspectedBytes, numBytes) + require.Equal(t, uint32(2), m.InspectedBlocks) // 1 head block, 1 complete block +} + +func BenchmarkInstanceSearchUnderLoad(b *testing.B) { + ctx := context.TODO() + //n := 1_000_000 + + i := defaultInstance(b, b.TempDir()) + + end := make(chan struct{}) + + concurrent := func(f func()) { + for { + select { + case <-end: + return + default: + f() + } + } + } + + go concurrent(func() { + id := make([]byte, 16) + rand.Read(id) + + trace := test.MakeTrace(10, id) + traceBytes, err := trace.Marshal() + require.NoError(b, err) + + searchData := &tempofb.SearchEntryMutable{} + searchData.TraceID = id + searchData.AddTag("foo", "bar") + searchBytes := searchData.ToBytes() + + // searchData will be nil if not + err = i.PushBytes(context.Background(), id, traceBytes, searchBytes) + require.NoError(b, err) + }) + + go concurrent(func() { + err := i.CutCompleteTraces(0, true) + require.NoError(b, err, "error cutting complete traces") + }) + + go concurrent(func() { + // Slow this down to prevent "too many open files" error + time.Sleep(10 * time.Millisecond) + _, err := i.CutBlockIfReady(0, 0, true) + require.NoError(b, err) + }) + + b.ResetTimer() + start := time.Now() + bytesInspected := uint64(0) + for j := 0; j < b.N; j++ { + var req = &tempopb.SearchRequest{ + Tags: map[string]string{"nomatch": "nomatch"}, + } + resp, err := i.Search(ctx, req) + require.NoError(b, err) + bytesInspected += resp.Metrics.InspectedBytes + } + elapsed := time.Since(start) + + fmt.Printf("Instance search throughput under load: %v elapsed %.2f MB = %.2f MiB/s throughput \n", + elapsed, + float64(bytesInspected)/(1024*1024), + float64(bytesInspected)/(elapsed.Seconds())/(1024*1024)) + + b.StopTimer() + close(end) + // Wait for go funcs to quit before + // exiting and cleaning up + time.Sleep(1 * time.Second) +} diff --git a/modules/ingester/instance_test.go b/modules/ingester/instance_test.go index c4ba9906e9f..3750238b9e3 100644 --- a/modules/ingester/instance_test.go +++ b/modules/ingester/instance_test.go @@ -9,9 +9,11 @@ import ( "testing" "time" + "github.com/go-kit/kit/log" "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" - "github.com/go-kit/kit/log" "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/modules/storage" "github.com/grafana/tempo/pkg/model" @@ -22,9 +24,6 @@ import ( "github.com/grafana/tempo/tempodb/backend/local" "github.com/grafana/tempo/tempodb/encoding" "github.com/grafana/tempo/tempodb/wal" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type ringCountMock struct { @@ -111,7 +110,7 @@ func TestInstanceFind(t *testing.T) { traceBytes, err := trace.Marshal() require.NoError(t, err) - err = i.PushBytes(context.Background(), id, traceBytes) + err = i.PushBytes(context.Background(), id, traceBytes, nil) require.NoError(t, err) assert.Equal(t, int(i.traceCount.Load()), len(i.traces)) @@ -129,7 +128,7 @@ func TestInstanceFind(t *testing.T) { traceBytes, err := traces[j].Marshal() require.NoError(t, err) - err = i.PushBytes(context.Background(), ids[j], traceBytes) + err = i.PushBytes(context.Background(), ids[j], traceBytes, nil) require.NoError(t, err) } diff --git a/modules/ingester/trace.go b/modules/ingester/trace.go index 5c98201a0cc..5eeedc17bee 100644 --- a/modules/ingester/trace.go +++ b/modules/ingester/trace.go @@ -5,10 +5,23 @@ import ( "encoding/hex" "time" + cortex_util "github.com/cortexproject/cortex/pkg/util/log" + "github.com/go-kit/kit/log/level" "github.com/gogo/status" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc/codes" + "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/pkg/tempopb" - "google.golang.org/grpc/codes" +) + +var ( + metricTraceSearchBytesDiscardedTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "ingester_trace_search_bytes_discarded_total", + Help: "The total number of trace search bytes discarded per tenant.", + }, []string{"tenant"}) ) type trace struct { @@ -17,20 +30,26 @@ type trace struct { traceID []byte maxBytes int currentBytes int + + // List of flatbuffers + searchData [][]byte + maxSearchBytes int + currentSearchBytes int } -func newTrace(maxBytes int, traceID []byte) *trace { +func newTrace(traceID []byte, maxBytes int, maxSearchBytes int) *trace { return &trace{ traceBytes: &tempopb.TraceBytes{ Traces: make([][]byte, 0, 10), // 10 for luck }, - lastAppend: time.Now(), - traceID: traceID, - maxBytes: maxBytes, + lastAppend: time.Now(), + traceID: traceID, + maxBytes: maxBytes, + maxSearchBytes: maxSearchBytes, } } -func (t *trace) Push(_ context.Context, trace []byte) error { +func (t *trace) Push(_ context.Context, instanceID string, trace []byte, searchData []byte) error { t.lastAppend = time.Now() if t.maxBytes != 0 { reqSize := len(trace) @@ -43,5 +62,17 @@ func (t *trace) Push(_ context.Context, trace []byte) error { t.traceBytes.Traces = append(t.traceBytes.Traces, trace) + if searchDataSize := len(searchData); searchDataSize > 0 { + // disable limit when set to 0 + if t.maxSearchBytes == 0 || t.currentSearchBytes+searchDataSize <= t.maxSearchBytes { + t.searchData = append(t.searchData, searchData) + t.currentSearchBytes += searchDataSize + } else { + // todo: info level since we are not expecting this limit to be hit, but calibrate accordingly in the future + level.Info(cortex_util.Logger).Log("msg", "size of search data exceeded max search bytes limit", "maxSearchBytes", t.maxSearchBytes, "discardedBytes", searchDataSize) + metricTraceSearchBytesDiscardedTotal.WithLabelValues(instanceID).Add(float64(searchDataSize)) + } + } + return nil } diff --git a/modules/ingester/trace_test.go b/modules/ingester/trace_test.go new file mode 100644 index 00000000000..f8c73cb53ca --- /dev/null +++ b/modules/ingester/trace_test.go @@ -0,0 +1,36 @@ +package ingester + +import ( + "context" + "testing" + + prom_dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" +) + +func TestTraceMaxSearchBytes(t *testing.T) { + tenantID := "fake" + maxSearchBytes := 100 + tr := newTrace(nil, 0, maxSearchBytes) + + getMetric := func() float64 { + m := &prom_dto.Metric{} + err := metricTraceSearchBytesDiscardedTotal.WithLabelValues(tenantID).Write(m) + require.NoError(t, err) + return m.Counter.GetValue() + } + + err := tr.Push(context.TODO(), tenantID, nil, make([]byte, maxSearchBytes)) + require.NoError(t, err) + require.Equal(t, float64(0), getMetric()) + + tooMany := 123 + + err = tr.Push(context.TODO(), tenantID, nil, make([]byte, tooMany)) + require.NoError(t, err) + require.Equal(t, float64(tooMany), getMetric()) + + err = tr.Push(context.TODO(), tenantID, nil, make([]byte, tooMany)) + require.NoError(t, err) + require.Equal(t, float64(tooMany*2), getMetric()) +} diff --git a/modules/overrides/limits.go b/modules/overrides/limits.go index 9feaf4613ac..dc294f6e779 100644 --- a/modules/overrides/limits.go +++ b/modules/overrides/limits.go @@ -32,6 +32,7 @@ type Limits struct { MaxLocalTracesPerUser int `yaml:"max_traces_per_user" json:"max_traces_per_user"` MaxGlobalTracesPerUser int `yaml:"max_global_traces_per_user" json:"max_global_traces_per_user"` MaxBytesPerTrace int `yaml:"max_bytes_per_trace" json:"max_bytes_per_trace"` + MaxSearchBytesPerTrace int `yaml:"max_search_bytes_per_trace" json:"max_search_bytes_per_trace"` // Compactor enforced limits. BlockRetention model.Duration `yaml:"block_retention" json:"block_retention"` @@ -52,6 +53,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLocalTracesPerUser, "ingester.max-traces-per-user", 10e3, "Maximum number of active traces per user, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalTracesPerUser, "ingester.max-global-traces-per-user", 0, "Maximum number of active traces per user, across the cluster. 0 to disable.") f.IntVar(&l.MaxBytesPerTrace, "ingester.max-bytes-per-trace", 50e5, "Maximum size of a trace in bytes. 0 to disable.") + f.IntVar(&l.MaxBytesPerTrace, "ingester.max-search-bytes-per-trace", 50e3, "Maximum size of search data per trace in bytes. 0 to disable.") f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides.") _ = l.PerTenantOverridePeriod.Set("10s") diff --git a/modules/overrides/overrides.go b/modules/overrides/overrides.go index f60b74d01be..379fe2bb1c3 100644 --- a/modules/overrides/overrides.go +++ b/modules/overrides/overrides.go @@ -147,6 +147,11 @@ func (o *Overrides) MaxBytesPerTrace(userID string) int { return o.getOverridesForUser(userID).MaxBytesPerTrace } +// MaxSearchBytesPerTrace returns the maximum size of search data for trace (in bytes) allowed for a user. +func (o *Overrides) MaxSearchBytesPerTrace(userID string) int { + return o.getOverridesForUser(userID).MaxSearchBytesPerTrace +} + // IngestionRateSpans is the number of spans per second allowed for this tenant func (o *Overrides) IngestionRateLimitBytes(userID string) float64 { return float64(o.getOverridesForUser(userID).IngestionRateLimitBytes) diff --git a/modules/querier/http.go b/modules/querier/http.go index 044587e9fbf..ba7792a2189 100644 --- a/modules/querier/http.go +++ b/modules/querier/http.go @@ -5,11 +5,13 @@ import ( "encoding/hex" "fmt" "net/http" + "strconv" "time" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "github.com/google/uuid" + "github.com/gorilla/mux" "github.com/grafana/tempo/pkg/tempopb" "github.com/grafana/tempo/pkg/util" "github.com/grafana/tempo/tempodb" @@ -26,6 +28,10 @@ const ( QueryModeIngesters = "ingesters" QueryModeBlocks = "blocks" QueryModeAll = "all" + + urlParamMinDuration = "minDuration" + urlParamMaxDuration = "maxDuration" + urlParamLimit = "limit" ) // TraceByIDHandler is a http.HandlerFunc to retrieve traces @@ -141,3 +147,121 @@ func validateAndSanitizeRequest(r *http.Request) (string, string, string, error) return start, end, queryMode, nil } + +func (q *Querier) SearchHandler(w http.ResponseWriter, r *http.Request) { + // Enforce the query timeout while querying backends + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + defer cancel() + + span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchHandler") + defer span.Finish() + + req := &tempopb.SearchRequest{ + Tags: map[string]string{}, + } + + for k, v := range r.URL.Query() { + // Skip known values + if k == urlParamMinDuration || k == urlParamMaxDuration || k == urlParamLimit { + continue + } + + if len(v) > 0 && v[0] != "" { + req.Tags[k] = v[0] + } + } + + if s := r.URL.Query().Get(urlParamMinDuration); s != "" { + dur, err := time.ParseDuration(s) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + } + req.MinDurationMs = uint32(dur.Milliseconds()) + } + + if s := r.URL.Query().Get(urlParamMaxDuration); s != "" { + dur, err := time.ParseDuration(s) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + } + req.MaxDurationMs = uint32(dur.Milliseconds()) + } + + if s := r.URL.Query().Get(urlParamLimit); s != "" { + limit, err := strconv.Atoi(s) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + req.Limit = uint32(limit) + } + + resp, err := q.Search(ctx, req) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + marshaller := &jsonpb.Marshaler{} + err = marshaller.Marshal(w, resp) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (q *Querier) SearchTagsHandler(w http.ResponseWriter, r *http.Request) { + // Enforce the query timeout while querying backends + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + defer cancel() + + span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagsHandler") + defer span.Finish() + + req := &tempopb.SearchTagsRequest{} + + resp, err := q.SearchTags(ctx, req) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + marshaller := &jsonpb.Marshaler{} + err = marshaller.Marshal(w, resp) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (q *Querier) SearchTagValuesHandler(w http.ResponseWriter, r *http.Request) { + // Enforce the query timeout while querying backends + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + defer cancel() + + span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagValuesHandler") + defer span.Finish() + + vars := mux.Vars(r) + tagName, ok := vars["tagName"] + if !ok { + http.Error(w, "please provide a tagName", http.StatusBadRequest) + return + } + req := &tempopb.SearchTagValuesRequest{ + TagName: tagName, + } + + resp, err := q.SearchTagValues(ctx, req) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + marshaller := &jsonpb.Marshaler{} + err = marshaller.Marshal(w, resp) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} diff --git a/modules/querier/querier.go b/modules/querier/querier.go index e5c5383dbd3..965bfb1595c 100644 --- a/modules/querier/querier.go +++ b/modules/querier/querier.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "sort" "github.com/opentracing/opentracing-go" ot_log "github.com/opentracing/opentracing-go/log" @@ -53,7 +54,7 @@ type Querier struct { type responseFromIngesters struct { addr string - response *tempopb.TraceByIDResponse + response interface{} } // New makes a new Querier. @@ -164,7 +165,7 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque span.LogFields(ot_log.String("msg", "searching ingesters")) // get responses from all ingesters in parallel - responses, err := q.forGivenIngesters(ctx, replicationSet, func(client tempopb.QuerierClient) (*tempopb.TraceByIDResponse, error) { + responses, err := q.forGivenIngesters(ctx, replicationSet, func(client tempopb.QuerierClient) (interface{}, error) { return client.FindTraceByID(opentracing.ContextWithSpan(ctx, span), req) }) if err != nil { @@ -172,7 +173,7 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque } for _, r := range responses { - trace := r.response.Trace + trace := r.response.(*tempopb.TraceByIDResponse).Trace if trace != nil { completeTrace, _, _, spanCount = model.CombineTraceProtos(completeTrace, trace) spanCountTotal += spanCount @@ -231,7 +232,7 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque } // forGivenIngesters runs f, in parallel, for given ingesters -func (q *Querier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(client tempopb.QuerierClient) (*tempopb.TraceByIDResponse, error)) ([]responseFromIngesters, error) { +func (q *Querier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(client tempopb.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { results, err := replicationSet.Do(ctx, q.cfg.ExtraQueryDelay, func(ctx context.Context, ingester *ring.InstanceDesc) (interface{}, error) { client, err := q.pool.GetClientFor(ingester.Addr) if err != nil { @@ -257,6 +258,142 @@ func (q *Querier) forGivenIngesters(ctx context.Context, replicationSet ring.Rep return responses, err } +func (q *Querier) Search(ctx context.Context, req *tempopb.SearchRequest) (*tempopb.SearchResponse, error) { + _, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, errors.Wrap(err, "error extracting org id in Querier.Search") + } + + replicationSet, err := q.ring.GetReplicationSetForOperation(ring.Read) + if err != nil { + return nil, errors.Wrap(err, "error finding ingesters in Querier.Search") + } + + responses, err := q.forGivenIngesters(ctx, replicationSet, func(client tempopb.QuerierClient) (interface{}, error) { + return client.Search(ctx, req) + }) + if err != nil { + return nil, errors.Wrap(err, "error querying ingesters in Querier.Search") + } + + return q.postProcessSearchResults(req, responses), nil +} + +func (q *Querier) SearchTags(ctx context.Context, req *tempopb.SearchTagsRequest) (*tempopb.SearchTagsResponse, error) { + _, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, errors.Wrap(err, "error extracting org id in Querier.SearchTags") + } + + replicationSet, err := q.ring.GetReplicationSetForOperation(ring.Read) + if err != nil { + return nil, errors.Wrap(err, "error finding ingesters in Querier.SearchTags") + } + + // Get results from all ingesters + lookupResults, err := q.forGivenIngesters(ctx, replicationSet, func(client tempopb.QuerierClient) (interface{}, error) { + return client.SearchTags(ctx, req) + }) + if err != nil { + return nil, errors.Wrap(err, "error querying ingesters in Querier.SearchTags") + } + + // Collect only unique values + uniqueMap := map[string]struct{}{} + for _, resp := range lookupResults { + for _, res := range resp.response.(*tempopb.SearchTagsResponse).TagNames { + uniqueMap[res] = struct{}{} + } + } + + // Final response (sorted) + resp := &tempopb.SearchTagsResponse{ + TagNames: make([]string, 0, len(uniqueMap)), + } + for k := range uniqueMap { + resp.TagNames = append(resp.TagNames, k) + } + sort.Strings(resp.TagNames) + + return resp, nil +} + +func (q *Querier) SearchTagValues(ctx context.Context, req *tempopb.SearchTagValuesRequest) (*tempopb.SearchTagValuesResponse, error) { + _, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, errors.Wrap(err, "error extracting org id in Querier.SearchTagValues") + } + + replicationSet, err := q.ring.GetReplicationSetForOperation(ring.Read) + if err != nil { + return nil, errors.Wrap(err, "error finding ingesters in Querier.SearchTagValues") + } + + // Get results from all ingesters + lookupResults, err := q.forGivenIngesters(ctx, replicationSet, func(client tempopb.QuerierClient) (interface{}, error) { + return client.SearchTagValues(ctx, req) + }) + if err != nil { + return nil, errors.Wrap(err, "error querying ingesters in Querier.SearchTagValues") + } + + // Collect only unique values + uniqueMap := map[string]struct{}{} + for _, resp := range lookupResults { + for _, res := range resp.response.(*tempopb.SearchTagValuesResponse).TagValues { + uniqueMap[res] = struct{}{} + } + } + + // Final response (sorted) + resp := &tempopb.SearchTagValuesResponse{ + TagValues: make([]string, 0, len(uniqueMap)), + } + for k := range uniqueMap { + resp.TagValues = append(resp.TagValues, k) + } + sort.Strings(resp.TagValues) + + return resp, nil +} + +func (q *Querier) postProcessSearchResults(req *tempopb.SearchRequest, rr []responseFromIngesters) *tempopb.SearchResponse { + response := &tempopb.SearchResponse{ + Metrics: &tempopb.SearchMetrics{}, + } + + traces := map[string]*tempopb.TraceSearchMetadata{} + + for _, r := range rr { + sr := r.response.(*tempopb.SearchResponse) + for _, t := range sr.Traces { + // Just simply take first result for each trace + if _, ok := traces[t.TraceID]; !ok { + traces[t.TraceID] = t + } + } + if sr.Metrics != nil { + response.Metrics.InspectedBytes += sr.Metrics.InspectedBytes + response.Metrics.InspectedTraces += sr.Metrics.InspectedTraces + response.Metrics.InspectedBlocks += sr.Metrics.InspectedBlocks + } + } + + for _, t := range traces { + response.Traces = append(response.Traces, t) + } + + // Sort and limit results + sort.Slice(response.Traces, func(i, j int) bool { + return response.Traces[i].StartTimeUnixNano > response.Traces[j].StartTimeUnixNano + }) + if req.Limit != 0 && int(req.Limit) < len(response.Traces) { + response.Traces = response.Traces[:req.Limit] + } + + return response +} + // implements blocklist.JobSharder. Queriers rely on compactors to build the tenant // index which they then consume. func (q *Querier) Owns(_ string) bool { diff --git a/opentelemetry-proto b/opentelemetry-proto index 286810dc20d..a17f202fdae 160000 --- a/opentelemetry-proto +++ b/opentelemetry-proto @@ -1 +1 @@ -Subproject commit 286810dc20d40f6483abf719f2b8de28f543fc78 +Subproject commit a17f202fdae65e828ac29fb663b5bc5b64b13290 diff --git a/operations/jsonnet/microservices/frontend.libsonnet b/operations/jsonnet/microservices/frontend.libsonnet index 50894e8bb36..aa625c85238 100644 --- a/operations/jsonnet/microservices/frontend.libsonnet +++ b/operations/jsonnet/microservices/frontend.libsonnet @@ -42,10 +42,7 @@ ]) + container.withVolumeMounts([ volumeMount.new(tempo_query_config_volume, '/conf'), - ]) + - container.withEnvMap({ - JAEGER_DISABLED: 'true', - }), + ]), tempo_query_frontend_deployment: deployment.new( diff --git a/operations/jsonnet/single-binary/tempo.libsonnet b/operations/jsonnet/single-binary/tempo.libsonnet index 8528b4f5b6a..ebb1c352c6a 100644 --- a/operations/jsonnet/single-binary/tempo.libsonnet +++ b/operations/jsonnet/single-binary/tempo.libsonnet @@ -59,10 +59,7 @@ ]) + container.withVolumeMounts([ volumeMount.new(tempo_query_config_volume, '/conf'), - ]) + - container.withEnvMap({ - JAEGER_DISABLED: 'true', - }), + ]), tempo_statefulset: statefulset.new('tempo', diff --git a/pkg/tempofb/KeyValues.go b/pkg/tempofb/KeyValues.go new file mode 100644 index 00000000000..293c7b8effe --- /dev/null +++ b/pkg/tempofb/KeyValues.go @@ -0,0 +1,75 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package tempofb + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type KeyValues struct { + _tab flatbuffers.Table +} + +func GetRootAsKeyValues(buf []byte, offset flatbuffers.UOffsetT) *KeyValues { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &KeyValues{} + x.Init(buf, n+offset) + return x +} + +func GetSizePrefixedRootAsKeyValues(buf []byte, offset flatbuffers.UOffsetT) *KeyValues { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &KeyValues{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func (rcv *KeyValues) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *KeyValues) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *KeyValues) Key() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *KeyValues) Value(j int) []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) + } + return nil +} + +func (rcv *KeyValues) ValueLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func KeyValuesStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func KeyValuesAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) +} +func KeyValuesAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0) +} +func KeyValuesStartValueVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func KeyValuesEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/tempofb/SearchEntry.go b/pkg/tempofb/SearchEntry.go new file mode 100644 index 00000000000..9b250cc97c2 --- /dev/null +++ b/pkg/tempofb/SearchEntry.go @@ -0,0 +1,108 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package tempofb + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type SearchEntry struct { + _tab flatbuffers.Table +} + +func GetRootAsSearchEntry(buf []byte, offset flatbuffers.UOffsetT) *SearchEntry { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SearchEntry{} + x.Init(buf, n+offset) + return x +} + +func GetSizePrefixedRootAsSearchEntry(buf []byte, offset flatbuffers.UOffsetT) *SearchEntry { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &SearchEntry{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func (rcv *SearchEntry) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SearchEntry) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *SearchEntry) Id() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *SearchEntry) Tags(obj *KeyValues, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SearchEntry) TagsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *SearchEntry) StartTimeUnixNano() uint64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.GetUint64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *SearchEntry) MutateStartTimeUnixNano(n uint64) bool { + return rcv._tab.MutateUint64Slot(8, n) +} + +func (rcv *SearchEntry) EndTimeUnixNano() uint64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetUint64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *SearchEntry) MutateEndTimeUnixNano(n uint64) bool { + return rcv._tab.MutateUint64Slot(10, n) +} + +func SearchEntryStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func SearchEntryAddId(builder *flatbuffers.Builder, id flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(id), 0) +} +func SearchEntryAddTags(builder *flatbuffers.Builder, tags flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(tags), 0) +} +func SearchEntryStartTagsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SearchEntryAddStartTimeUnixNano(builder *flatbuffers.Builder, startTimeUnixNano uint64) { + builder.PrependUint64Slot(2, startTimeUnixNano, 0) +} +func SearchEntryAddEndTimeUnixNano(builder *flatbuffers.Builder, endTimeUnixNano uint64) { + builder.PrependUint64Slot(3, endTimeUnixNano, 0) +} +func SearchEntryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/tempofb/SearchPage.go b/pkg/tempofb/SearchPage.go new file mode 100644 index 00000000000..fceded8ebca --- /dev/null +++ b/pkg/tempofb/SearchPage.go @@ -0,0 +1,93 @@ +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package tempofb + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type SearchPage struct { + _tab flatbuffers.Table +} + +func GetRootAsSearchPage(buf []byte, offset flatbuffers.UOffsetT) *SearchPage { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SearchPage{} + x.Init(buf, n+offset) + return x +} + +func GetSizePrefixedRootAsSearchPage(buf []byte, offset flatbuffers.UOffsetT) *SearchPage { + n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) + x := &SearchPage{} + x.Init(buf, n+offset+flatbuffers.SizeUint32) + return x +} + +func (rcv *SearchPage) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SearchPage) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *SearchPage) Tags(obj *KeyValues, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SearchPage) TagsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *SearchPage) Entries(obj *SearchEntry, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SearchPage) EntriesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func SearchPageStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func SearchPageAddTags(builder *flatbuffers.Builder, tags flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(tags), 0) +} +func SearchPageStartTagsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SearchPageAddEntries(builder *flatbuffers.Builder, entries flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(entries), 0) +} +func SearchPageStartEntriesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SearchPageEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/pkg/tempofb/searchdata_test.go b/pkg/tempofb/searchdata_test.go new file mode 100644 index 00000000000..df538e2eed9 --- /dev/null +++ b/pkg/tempofb/searchdata_test.go @@ -0,0 +1,43 @@ +package tempofb + +import ( + "fmt" + "testing" +) + +func sizeWithCounts(traceCount, tagCount, valueCount int) int { + b := NewSearchPageBuilder() + + for t := 0; t < traceCount; t++ { + sd := &SearchEntryMutable{} + for g := 0; g < tagCount; g++ { + for v := 0; v < valueCount; v++ { + sd.AddTag(fmt.Sprintf("tag%d", g), fmt.Sprintf("value%d", v)) + } + } + + b.AddData(sd) + } + + return len(b.Finish()) +} + +func TestEncodingSize(t *testing.T) { + delta := 1000 + + batchBaseLine := sizeWithCounts(0, 0, 0) + + traceBaseLine := sizeWithCounts(1, 0, 0) + traceLongTerm := sizeWithCounts(delta, 0, 0) + + tagValueBaseLine := sizeWithCounts(1, 1, 1) + tagValueLongTermTags := sizeWithCounts(1, delta, 1) + tagValueLongTermValues := sizeWithCounts(1, 1, delta) + + fmt.Printf("Data sizes:\n") + fmt.Printf("- Batch: %d bytes\n", batchBaseLine) + fmt.Printf("- Trace: %d bytes first, %.1f bytes after\n", traceBaseLine-batchBaseLine, float32(traceLongTerm-traceBaseLine)/float32(delta)) + fmt.Printf("- TagValue: %d bytes first\n", tagValueBaseLine-traceBaseLine) + fmt.Printf(" - Tag: %.1f bytes after\n", float32(tagValueLongTermTags-tagValueBaseLine)/float32(delta)) + fmt.Printf(" - Value: %.1f bytes after\n", float32(tagValueLongTermValues-tagValueBaseLine)/float32(delta)) +} diff --git a/pkg/tempofb/searchdata_util.go b/pkg/tempofb/searchdata_util.go new file mode 100644 index 00000000000..f03667c547f --- /dev/null +++ b/pkg/tempofb/searchdata_util.go @@ -0,0 +1,292 @@ +package tempofb + +import ( + "bytes" + "sort" + "strings" + + flatbuffers "github.com/google/flatbuffers/go" + "github.com/grafana/tempo/tempodb/encoding/common" +) + +// TagContainer is anything with KeyValues (tags). This is implemented by both +// SearchPage and SearchEntry. +type TagContainer interface { + Tags(obj *KeyValues, j int) bool + TagsLength() int +} + +var _ TagContainer = (*SearchPage)(nil) +var _ TagContainer = (*SearchEntry)(nil) + +type SearchDataMap map[string][]string + +func (s SearchDataMap) Add(k, v string) { + vs, ok := s[k] + if !ok { + // First entry for key + s[k] = []string{v} + return + } + + // Key already present, now check for value + for i := range vs { + if vs[i] == v { + // Already present, nothing to do + return + } + } + + // Not found, append + s[k] = append(vs, v) +} + +func (s SearchDataMap) WriteToBuilder(b *flatbuffers.Builder) flatbuffers.UOffsetT { + offsets := make([]flatbuffers.UOffsetT, 0, len(s)) + + // Sort keys + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + // Skip empty keys + if len(s[k]) <= 0 { + continue + } + + ko := b.CreateSharedString(strings.ToLower(k)) + + // Sort values + v := s[k] + sort.Strings(v) + + valueStrings := make([]flatbuffers.UOffsetT, len(v)) + for i := range v { + valueStrings[i] = b.CreateSharedString(strings.ToLower(v[i])) + } + + KeyValuesStartValueVector(b, len(valueStrings)) + for _, vs := range valueStrings { + b.PrependUOffsetT(vs) + } + valueVector := b.EndVector(len(valueStrings)) + + KeyValuesStart(b) + KeyValuesAddKey(b, ko) + KeyValuesAddValue(b, valueVector) + offsets = append(offsets, KeyValuesEnd(b)) + } + + SearchEntryStartTagsVector(b, len(offsets)) + for _, kvo := range offsets { + b.PrependUOffsetT(kvo) + } + keyValueVector := b.EndVector((len(offsets))) + return keyValueVector +} + +// SearchEntryMutable is a mutable form of the flatbuffer-compiled SearchEntry struct to make building and transporting easier. +type SearchEntryMutable struct { + TraceID common.ID + Tags SearchDataMap + StartTimeUnixNano uint64 + EndTimeUnixNano uint64 +} + +// AddTag adds the unique tag name and value to the search data. No effect if the pair is already present. +func (s *SearchEntryMutable) AddTag(k string, v string) { + if s.Tags == nil { + s.Tags = SearchDataMap{} + } + s.Tags.Add(k, v) +} + +// SetStartTimeUnixNano records the earliest of all timestamps passed to this function. +func (s *SearchEntryMutable) SetStartTimeUnixNano(t uint64) { + if t > 0 && s.StartTimeUnixNano == 0 || s.StartTimeUnixNano > t { + s.StartTimeUnixNano = t + } +} + +// SetEndTimeUnixNano records the latest of all timestamps passed to this function. +func (s *SearchEntryMutable) SetEndTimeUnixNano(t uint64) { + if t > 0 && t > s.EndTimeUnixNano { + s.EndTimeUnixNano = t + } +} + +func (s *SearchEntryMutable) ToBytes() []byte { + b := flatbuffers.NewBuilder(2048) + offset := s.WriteToBuilder(b) + b.Finish(offset) + return b.FinishedBytes() +} + +func (s *SearchEntryMutable) WriteToBuilder(b *flatbuffers.Builder) flatbuffers.UOffsetT { + + idOffset := b.CreateByteString(s.TraceID) + + tagOffset := s.Tags.WriteToBuilder(b) + + SearchEntryStart(b) + SearchEntryAddId(b, idOffset) + SearchEntryAddStartTimeUnixNano(b, s.StartTimeUnixNano) + SearchEntryAddEndTimeUnixNano(b, s.EndTimeUnixNano) + SearchEntryAddTags(b, tagOffset) + return SearchEntryEnd(b) +} + +type SearchPageBuilder struct { + builder *flatbuffers.Builder + allTags SearchDataMap + pageEntries []flatbuffers.UOffsetT +} + +func NewSearchPageBuilder() *SearchPageBuilder { + return &SearchPageBuilder{ + builder: flatbuffers.NewBuilder(1024), + allTags: SearchDataMap{}, + } +} + +func (b *SearchPageBuilder) AddData(data *SearchEntryMutable) int { + for k, vv := range data.Tags { + for _, v := range vv { + b.allTags.Add(k, v) + } + } + + oldOffset := b.builder.Offset() + offset := data.WriteToBuilder(b.builder) + b.pageEntries = append(b.pageEntries, offset) + + // bytes written + return int(offset - oldOffset) +} + +func (b *SearchPageBuilder) Finish() []byte { + // At this point all individual entries have been written + // to the fb builder. Now we need to wrap them up in the final + // batch object. + + // Create vector + SearchPageStartEntriesVector(b.builder, len(b.pageEntries)) + for _, entry := range b.pageEntries { + b.builder.PrependUOffsetT(entry) + } + entryVector := b.builder.EndVector(len(b.pageEntries)) + + // Create batch-level tags + tagOffset := b.allTags.WriteToBuilder(b.builder) + + // Write final batch object + SearchPageStart(b.builder) + SearchPageAddEntries(b.builder, entryVector) + SearchPageAddTags(b.builder, tagOffset) + batch := SearchPageEnd(b.builder) + b.builder.Finish(batch) + buf := b.builder.FinishedBytes() + + return buf +} + +func (b *SearchPageBuilder) Reset() { + b.builder.Reset() + b.pageEntries = b.pageEntries[:0] + b.allTags = SearchDataMap{} +} + +// Get searches the entry and returns the first value found for the given key. +func (s *SearchEntry) Get(k string) string { + kv := &KeyValues{} + kb := bytes.ToLower([]byte(k)) + + // TODO - Use binary search since keys/values are sorted + for i := 0; i < s.TagsLength(); i++ { + s.Tags(kv, i) + if bytes.Equal(kv.Key(), kb) { + return string(kv.Value(0)) + } + } + + return "" +} + +// Contains returns true if the key and value are found in the search data. +// Buffer KeyValue object can be passed to reduce allocations. Key and value must be +// already converted to byte slices which match the nature of the flatbuffer data +// which reduces allocations even further. +func (s *SearchEntry) Contains(kv *KeyValues, k []byte, v []byte) bool { + + matched := -1 + + // Binary search for keys. Flatbuffers are written backwards so + // keys are descending (the comparison is reversed). + // TODO - We only want exact matches, sort.Search has to make an + // extra comparison. We should fork it to make use of the full + // tri-state response from bytes.Compare + sort.Search(s.TagsLength(), func(i int) bool { + s.Tags(kv, i) + comparison := bytes.Compare(k, kv.Key()) + if comparison == 0 { + matched = i + // TODO it'd be great to exit here and retain the data in kv buffer + } + return comparison >= 0 + }) + + if matched >= 0 { + s.Tags(kv, matched) + + // Linear search for matching values + l := kv.ValueLength() + for j := 0; j < l; j++ { + if bytes.Contains(kv.Value(j), v) { + return true + } + } + } + + return false +} + +func SearchEntryFromBytes(b []byte) *SearchEntry { + return GetRootAsSearchEntry(b, 0) +} + +func ContainsTag(s TagContainer, kv *KeyValues, k []byte, v []byte) bool { + + matched := -1 + + // Binary search for keys. Flatbuffers are written backwards so + // keys are descending (the comparison is reversed). + // TODO - We only want exact matches, sort.Search has to make an + // extra comparison. We should fork it to make use of the full + // tri-state response from bytes.Compare + sort.Search(s.TagsLength(), func(i int) bool { + s.Tags(kv, i) + comparison := bytes.Compare(k, kv.Key()) + if comparison == 0 { + matched = i + // TODO it'd be great to exit here and retain the data in kv buffer + } + return comparison >= 0 + }) + + if matched >= 0 { + s.Tags(kv, matched) + + // Linear search for matching values + l := kv.ValueLength() + for j := 0; j < l; j++ { + if bytes.Contains(kv.Value(j), v) { + return true + } + } + } + + return false +} diff --git a/pkg/tempofb/tempo.fbs b/pkg/tempofb/tempo.fbs new file mode 100644 index 00000000000..8a9b74e060f --- /dev/null +++ b/pkg/tempofb/tempo.fbs @@ -0,0 +1,25 @@ +namespace tempofb; + +table KeyValues { + key: string; + value: [string]; +} + +// SearchEntry is the search data for a trace. +table SearchEntry { + id : string; // Converted to []byte + tags : [KeyValues]; + start_time_unix_nano: uint64; + end_time_unix_nano: uint64; +} + +// SearchPage is a contiguous block of flatbuffer data +// that contains traces and header data. +table SearchPage { + // This is a rollup of all distinct tags/values in the + // page for quick elimination. + tags : [KeyValues]; + + // Trace entries + entries : [SearchEntry]; +} \ No newline at end of file diff --git a/pkg/tempopb/common/v1/common.pb.go b/pkg/tempopb/common/v1/common.pb.go index 74e00f1cfdf..86446b2d7d3 100644 --- a/pkg/tempopb/common/v1/common.pb.go +++ b/pkg/tempopb/common/v1/common.pb.go @@ -320,6 +320,8 @@ func (m *KeyValue) GetValue() *AnyValue { // StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version // of KeyValue that only supports string values. +// +// Deprecated: Do not use. type StringKeyValue struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -439,35 +441,35 @@ func init() { func init() { proto.RegisterFile("common/v1/common.proto", fileDescriptor_92d5df4519b8f2e3) } var fileDescriptor_92d5df4519b8f2e3 = []byte{ - // 443 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xc1, 0x6e, 0xd4, 0x30, - 0x10, 0x8d, 0xbb, 0xed, 0x76, 0x33, 0x59, 0x21, 0xb0, 0x50, 0xb5, 0x12, 0x6a, 0x1a, 0x2d, 0x97, - 0x9c, 0x12, 0x6d, 0x7b, 0xe1, 0xc8, 0x6e, 0x11, 0x0a, 0xa2, 0x12, 0x55, 0x90, 0x38, 0x70, 0x41, - 0x0e, 0x98, 0x60, 0x6d, 0x62, 0x47, 0x8e, 0x13, 0x29, 0x7f, 0xc1, 0x4f, 0xf0, 0x2f, 0x1c, 0x7b, - 0xe4, 0x88, 0x76, 0x7f, 0x04, 0xc5, 0x76, 0xd2, 0x4a, 0x54, 0xd0, 0xdb, 0xf8, 0xe5, 0xbd, 0x37, - 0xcf, 0x33, 0x0e, 0x9c, 0x7c, 0x16, 0x65, 0x29, 0x78, 0xdc, 0xae, 0x62, 0x53, 0x45, 0x95, 0x14, - 0x4a, 0xe0, 0x27, 0x8a, 0x96, 0x95, 0xa8, 0xb2, 0xc8, 0xa2, 0xed, 0x6a, 0xf9, 0xe3, 0x00, 0x66, - 0x6b, 0xde, 0x7d, 0x20, 0x45, 0x43, 0xf1, 0x73, 0x98, 0xd7, 0x4a, 0x32, 0x9e, 0x7f, 0x6a, 0xfb, - 0xf3, 0x02, 0x05, 0x28, 0x74, 0x13, 0x27, 0xf5, 0x0c, 0x6a, 0x48, 0x67, 0x00, 0x99, 0x10, 0x85, - 0xa5, 0x1c, 0x04, 0x28, 0x9c, 0x25, 0x4e, 0xea, 0xf6, 0x98, 0x21, 0x9c, 0x82, 0xcb, 0xb8, 0xb2, - 0xdf, 0x27, 0x01, 0x0a, 0x27, 0x89, 0x93, 0xce, 0x18, 0x57, 0x63, 0x93, 0x2f, 0xa2, 0xc9, 0x0a, - 0x6a, 0x19, 0x87, 0x01, 0x0a, 0x51, 0xdf, 0xc4, 0xa0, 0x86, 0xf4, 0x12, 0x3c, 0x22, 0x25, 0xe9, - 0x2c, 0xe7, 0x28, 0x40, 0xa1, 0x77, 0x7e, 0x1a, 0xfd, 0x95, 0x3f, 0x5a, 0xf7, 0x2c, 0xad, 0x49, - 0x9c, 0x14, 0xc8, 0x78, 0xc2, 0xaf, 0x60, 0xbe, 0x6d, 0x0b, 0x56, 0x0f, 0x41, 0xa6, 0xda, 0xe2, - 0xec, 0x1e, 0x8b, 0xb7, 0xd4, 0x48, 0xae, 0x58, 0xad, 0xfa, 0x1c, 0x46, 0xa6, 0xa1, 0xcd, 0x31, - 0x1c, 0x69, 0xf9, 0x72, 0x0d, 0x70, 0xdb, 0x0a, 0x5f, 0xc0, 0x54, 0xc3, 0xf5, 0x02, 0x05, 0x93, - 0xd0, 0x3b, 0x7f, 0x76, 0x5f, 0x32, 0x3b, 0xd5, 0xd4, 0x52, 0x97, 0x97, 0x30, 0xbf, 0xdb, 0xea, - 0x41, 0x26, 0x83, 0x60, 0x34, 0x79, 0x07, 0xb3, 0x01, 0xc3, 0x8f, 0x61, 0xb2, 0xa5, 0x9d, 0xd9, - 0x52, 0xda, 0x97, 0x78, 0x65, 0xe3, 0xea, 0xb5, 0xfc, 0x27, 0x96, 0xbd, 0xd8, 0x0b, 0x78, 0xf4, - 0x5e, 0x6f, 0xf7, 0x1f, 0xb6, 0x4f, 0xef, 0xda, 0xba, 0x83, 0xf2, 0x35, 0x9c, 0xbc, 0xe1, 0xb5, - 0x92, 0x4d, 0x49, 0xb9, 0x22, 0x8a, 0x09, 0x7e, 0xc5, 0x32, 0x49, 0x64, 0x87, 0x31, 0x1c, 0x72, - 0x52, 0xda, 0xf7, 0x93, 0xea, 0x1a, 0x2f, 0xe0, 0xb8, 0xa5, 0xb2, 0x66, 0x82, 0x5b, 0x97, 0xe1, - 0xb8, 0xe9, 0x7e, 0xee, 0x7c, 0x74, 0xb3, 0xf3, 0xd1, 0xef, 0x9d, 0x8f, 0xbe, 0xef, 0x7d, 0xe7, - 0x66, 0xef, 0x3b, 0xbf, 0xf6, 0xbe, 0x03, 0x01, 0x13, 0x91, 0xa8, 0x28, 0x57, 0xb4, 0xa0, 0x25, - 0x55, 0xb2, 0x33, 0xef, 0xf8, 0xf6, 0x36, 0x1b, 0xef, 0x52, 0x97, 0xd7, 0x3d, 0x7c, 0x8d, 0x3e, - 0x46, 0x39, 0x53, 0xdf, 0x1a, 0x7d, 0xdd, 0x38, 0x97, 0xe4, 0x2b, 0xe1, 0x24, 0xd6, 0x23, 0x88, - 0xab, 0x6d, 0x1e, 0xdb, 0x61, 0xc4, 0xe3, 0xdf, 0x91, 0x4d, 0xb5, 0xdf, 0xc5, 0x9f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xe2, 0xbc, 0x4d, 0xd3, 0x31, 0x03, 0x00, 0x00, + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xce, 0xb4, 0xbb, 0xdd, 0xf6, 0xa5, 0x88, 0x0e, 0xb2, 0x14, 0x64, 0xb3, 0xa1, 0x5e, 0x72, + 0x4a, 0xe8, 0xee, 0x4d, 0x3c, 0xd8, 0xae, 0x48, 0xc4, 0x05, 0x97, 0x08, 0x1e, 0xbc, 0xc8, 0x44, + 0xc7, 0x38, 0x34, 0x99, 0x09, 0x93, 0x49, 0x20, 0xff, 0xc2, 0x3f, 0xe1, 0x7f, 0xf1, 0xb8, 0x47, + 0x8f, 0xd2, 0xfe, 0x11, 0xc9, 0xcc, 0x24, 0xbb, 0x60, 0x51, 0x6f, 0x6f, 0xbe, 0x7c, 0xdf, 0xf7, + 0xbe, 0x79, 0x6f, 0x02, 0xa7, 0x9f, 0x44, 0x51, 0x08, 0x1e, 0x35, 0xab, 0xc8, 0x54, 0x61, 0x29, + 0x85, 0x12, 0xf8, 0x91, 0xa2, 0x45, 0x29, 0xca, 0x34, 0xb4, 0x68, 0xb3, 0x5a, 0x7e, 0x1f, 0xc1, + 0x74, 0xcd, 0xdb, 0xf7, 0x24, 0xaf, 0x29, 0x7e, 0x0a, 0xf3, 0x4a, 0x49, 0xc6, 0xb3, 0x8f, 0x4d, + 0x77, 0x5e, 0x20, 0x1f, 0x05, 0xb3, 0xd8, 0x49, 0x5c, 0x83, 0x1a, 0xd2, 0x39, 0x40, 0x2a, 0x44, + 0x6e, 0x29, 0x23, 0x1f, 0x05, 0xd3, 0xd8, 0x49, 0x66, 0x1d, 0x66, 0x08, 0x67, 0x30, 0x63, 0x5c, + 0xd9, 0xef, 0x63, 0x1f, 0x05, 0xe3, 0xd8, 0x49, 0xa6, 0x8c, 0xab, 0xa1, 0xc9, 0x67, 0x51, 0xa7, + 0x39, 0xb5, 0x8c, 0x23, 0x1f, 0x05, 0xa8, 0x6b, 0x62, 0x50, 0x43, 0x7a, 0x01, 0x2e, 0x91, 0x92, + 0xb4, 0x96, 0x73, 0xec, 0xa3, 0xc0, 0xbd, 0x38, 0x0b, 0xff, 0xc8, 0x1f, 0xae, 0x3b, 0x96, 0xd6, + 0xc4, 0x4e, 0x02, 0x64, 0x38, 0xe1, 0x97, 0x30, 0xdf, 0x36, 0x39, 0xab, 0xfa, 0x20, 0x13, 0x6d, + 0x71, 0x7e, 0xc0, 0xe2, 0x0d, 0x35, 0x92, 0x6b, 0x56, 0xa9, 0x2e, 0x87, 0x91, 0x69, 0x68, 0x73, + 0x02, 0xc7, 0x5a, 0xbe, 0x5c, 0x03, 0xdc, 0xb5, 0xc2, 0x97, 0x30, 0xd1, 0x70, 0xb5, 0x40, 0xfe, + 0x38, 0x70, 0x2f, 0x9e, 0x1c, 0x4a, 0x66, 0xa7, 0x9a, 0x58, 0xea, 0xf2, 0x0a, 0xe6, 0xf7, 0x5b, + 0xfd, 0x97, 0x49, 0x2f, 0x18, 0x4c, 0xde, 0xc2, 0xb4, 0xc7, 0xf0, 0x43, 0x18, 0x6f, 0x69, 0x6b, + 0xb6, 0x94, 0x74, 0x25, 0x5e, 0xd9, 0xb8, 0x7a, 0x2d, 0xff, 0x88, 0x65, 0x2f, 0xf6, 0x1c, 0x1e, + 0xbc, 0xd3, 0xdb, 0xfd, 0x8b, 0xed, 0xe3, 0xfb, 0xb6, 0x33, 0xab, 0x7c, 0x36, 0x5a, 0xa0, 0xe5, + 0x2b, 0x38, 0x7d, 0xcd, 0x2b, 0x25, 0xeb, 0x82, 0x72, 0x45, 0x14, 0x13, 0xfc, 0x9a, 0xa5, 0x92, + 0xc8, 0x16, 0x63, 0x38, 0xe2, 0xa4, 0xb0, 0x6f, 0x28, 0xd1, 0x35, 0x5e, 0xc0, 0x49, 0x43, 0x65, + 0xc5, 0x04, 0xb7, 0x4e, 0xfd, 0x71, 0xd3, 0xfe, 0xd8, 0x79, 0xe8, 0x76, 0xe7, 0xa1, 0x5f, 0x3b, + 0x0f, 0x7d, 0xdb, 0x7b, 0xce, 0xed, 0xde, 0x73, 0x7e, 0xee, 0x3d, 0x07, 0x7c, 0x26, 0x42, 0x51, + 0x52, 0xae, 0x68, 0x4e, 0x0b, 0xaa, 0x64, 0x6b, 0xde, 0xf2, 0xdd, 0x8d, 0x36, 0xee, 0x95, 0x2e, + 0x6f, 0x3a, 0xf8, 0x06, 0x7d, 0x08, 0x33, 0xa6, 0xbe, 0xd6, 0xfa, 0xca, 0x51, 0x26, 0xc9, 0x17, + 0xc2, 0x49, 0xa4, 0xc7, 0x10, 0x95, 0xdb, 0x2c, 0xb2, 0x03, 0x89, 0x86, 0x3f, 0x24, 0x9d, 0x68, + 0xbf, 0xcb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x2c, 0x05, 0xfb, 0x35, 0x03, 0x00, 0x00, } func (m *AnyValue) Marshal() (dAtA []byte, err error) { diff --git a/pkg/tempopb/tempo.pb.go b/pkg/tempopb/tempo.pb.go index f6d9beb0595..5f5bea0ac0a 100644 --- a/pkg/tempopb/tempo.pb.go +++ b/pkg/tempopb/tempo.pb.go @@ -141,6 +141,431 @@ func (m *TraceByIDResponse) GetTrace() *Trace { return nil } +type SearchRequest struct { + // case insensitive partial match + Tags map[string]string `protobuf:"bytes,1,rep,name=Tags,proto3" json:"Tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + MinDurationMs uint32 `protobuf:"varint,2,opt,name=MinDurationMs,proto3" json:"MinDurationMs,omitempty"` + MaxDurationMs uint32 `protobuf:"varint,3,opt,name=MaxDurationMs,proto3" json:"MaxDurationMs,omitempty"` + Limit uint32 `protobuf:"varint,4,opt,name=Limit,proto3" json:"Limit,omitempty"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{2} +} +func (m *SearchRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchRequest.Merge(m, src) +} +func (m *SearchRequest) XXX_Size() int { + return m.Size() +} +func (m *SearchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SearchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchRequest proto.InternalMessageInfo + +func (m *SearchRequest) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *SearchRequest) GetMinDurationMs() uint32 { + if m != nil { + return m.MinDurationMs + } + return 0 +} + +func (m *SearchRequest) GetMaxDurationMs() uint32 { + if m != nil { + return m.MaxDurationMs + } + return 0 +} + +func (m *SearchRequest) GetLimit() uint32 { + if m != nil { + return m.Limit + } + return 0 +} + +type SearchResponse struct { + Traces []*TraceSearchMetadata `protobuf:"bytes,1,rep,name=traces,proto3" json:"traces,omitempty"` + Metrics *SearchMetrics `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{3} +} +func (m *SearchResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchResponse.Merge(m, src) +} +func (m *SearchResponse) XXX_Size() int { + return m.Size() +} +func (m *SearchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SearchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchResponse proto.InternalMessageInfo + +func (m *SearchResponse) GetTraces() []*TraceSearchMetadata { + if m != nil { + return m.Traces + } + return nil +} + +func (m *SearchResponse) GetMetrics() *SearchMetrics { + if m != nil { + return m.Metrics + } + return nil +} + +type TraceSearchMetadata struct { + TraceID string `protobuf:"bytes,1,opt,name=traceID,proto3" json:"traceID,omitempty"` + RootServiceName string `protobuf:"bytes,2,opt,name=rootServiceName,proto3" json:"rootServiceName,omitempty"` + RootTraceName string `protobuf:"bytes,3,opt,name=rootTraceName,proto3" json:"rootTraceName,omitempty"` + StartTimeUnixNano uint64 `protobuf:"varint,4,opt,name=startTimeUnixNano,proto3" json:"startTimeUnixNano,omitempty"` + DurationMs uint32 `protobuf:"varint,5,opt,name=durationMs,proto3" json:"durationMs,omitempty"` +} + +func (m *TraceSearchMetadata) Reset() { *m = TraceSearchMetadata{} } +func (m *TraceSearchMetadata) String() string { return proto.CompactTextString(m) } +func (*TraceSearchMetadata) ProtoMessage() {} +func (*TraceSearchMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{4} +} +func (m *TraceSearchMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraceSearchMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TraceSearchMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TraceSearchMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceSearchMetadata.Merge(m, src) +} +func (m *TraceSearchMetadata) XXX_Size() int { + return m.Size() +} +func (m *TraceSearchMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_TraceSearchMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceSearchMetadata proto.InternalMessageInfo + +func (m *TraceSearchMetadata) GetTraceID() string { + if m != nil { + return m.TraceID + } + return "" +} + +func (m *TraceSearchMetadata) GetRootServiceName() string { + if m != nil { + return m.RootServiceName + } + return "" +} + +func (m *TraceSearchMetadata) GetRootTraceName() string { + if m != nil { + return m.RootTraceName + } + return "" +} + +func (m *TraceSearchMetadata) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *TraceSearchMetadata) GetDurationMs() uint32 { + if m != nil { + return m.DurationMs + } + return 0 +} + +type SearchMetrics struct { + InspectedTraces uint32 `protobuf:"varint,1,opt,name=inspectedTraces,proto3" json:"inspectedTraces,omitempty"` + InspectedBytes uint64 `protobuf:"varint,2,opt,name=inspectedBytes,proto3" json:"inspectedBytes,omitempty"` + InspectedBlocks uint32 `protobuf:"varint,3,opt,name=inspectedBlocks,proto3" json:"inspectedBlocks,omitempty"` +} + +func (m *SearchMetrics) Reset() { *m = SearchMetrics{} } +func (m *SearchMetrics) String() string { return proto.CompactTextString(m) } +func (*SearchMetrics) ProtoMessage() {} +func (*SearchMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{5} +} +func (m *SearchMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchMetrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchMetrics.Merge(m, src) +} +func (m *SearchMetrics) XXX_Size() int { + return m.Size() +} +func (m *SearchMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_SearchMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchMetrics proto.InternalMessageInfo + +func (m *SearchMetrics) GetInspectedTraces() uint32 { + if m != nil { + return m.InspectedTraces + } + return 0 +} + +func (m *SearchMetrics) GetInspectedBytes() uint64 { + if m != nil { + return m.InspectedBytes + } + return 0 +} + +func (m *SearchMetrics) GetInspectedBlocks() uint32 { + if m != nil { + return m.InspectedBlocks + } + return 0 +} + +type SearchTagsRequest struct { +} + +func (m *SearchTagsRequest) Reset() { *m = SearchTagsRequest{} } +func (m *SearchTagsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchTagsRequest) ProtoMessage() {} +func (*SearchTagsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{6} +} +func (m *SearchTagsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchTagsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchTagsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchTagsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchTagsRequest.Merge(m, src) +} +func (m *SearchTagsRequest) XXX_Size() int { + return m.Size() +} +func (m *SearchTagsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SearchTagsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchTagsRequest proto.InternalMessageInfo + +type SearchTagsResponse struct { + TagNames []string `protobuf:"bytes,1,rep,name=tagNames,proto3" json:"tagNames,omitempty"` +} + +func (m *SearchTagsResponse) Reset() { *m = SearchTagsResponse{} } +func (m *SearchTagsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchTagsResponse) ProtoMessage() {} +func (*SearchTagsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{7} +} +func (m *SearchTagsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchTagsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchTagsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchTagsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchTagsResponse.Merge(m, src) +} +func (m *SearchTagsResponse) XXX_Size() int { + return m.Size() +} +func (m *SearchTagsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SearchTagsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchTagsResponse proto.InternalMessageInfo + +func (m *SearchTagsResponse) GetTagNames() []string { + if m != nil { + return m.TagNames + } + return nil +} + +type SearchTagValuesRequest struct { + TagName string `protobuf:"bytes,1,opt,name=tagName,proto3" json:"tagName,omitempty"` +} + +func (m *SearchTagValuesRequest) Reset() { *m = SearchTagValuesRequest{} } +func (m *SearchTagValuesRequest) String() string { return proto.CompactTextString(m) } +func (*SearchTagValuesRequest) ProtoMessage() {} +func (*SearchTagValuesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{8} +} +func (m *SearchTagValuesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchTagValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchTagValuesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchTagValuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchTagValuesRequest.Merge(m, src) +} +func (m *SearchTagValuesRequest) XXX_Size() int { + return m.Size() +} +func (m *SearchTagValuesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SearchTagValuesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchTagValuesRequest proto.InternalMessageInfo + +func (m *SearchTagValuesRequest) GetTagName() string { + if m != nil { + return m.TagName + } + return "" +} + +type SearchTagValuesResponse struct { + TagValues []string `protobuf:"bytes,1,rep,name=tagValues,proto3" json:"tagValues,omitempty"` +} + +func (m *SearchTagValuesResponse) Reset() { *m = SearchTagValuesResponse{} } +func (m *SearchTagValuesResponse) String() string { return proto.CompactTextString(m) } +func (*SearchTagValuesResponse) ProtoMessage() {} +func (*SearchTagValuesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{9} +} +func (m *SearchTagValuesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SearchTagValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SearchTagValuesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SearchTagValuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchTagValuesResponse.Merge(m, src) +} +func (m *SearchTagValuesResponse) XXX_Size() int { + return m.Size() +} +func (m *SearchTagValuesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SearchTagValuesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchTagValuesResponse proto.InternalMessageInfo + +func (m *SearchTagValuesResponse) GetTagValues() []string { + if m != nil { + return m.TagValues + } + return nil +} + type Trace struct { Batches []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=batches,proto3" json:"batches,omitempty"` } @@ -149,7 +574,7 @@ func (m *Trace) Reset() { *m = Trace{} } func (m *Trace) String() string { return proto.CompactTextString(m) } func (*Trace) ProtoMessage() {} func (*Trace) Descriptor() ([]byte, []int) { - return fileDescriptor_f22805646f4f62b6, []int{2} + return fileDescriptor_f22805646f4f62b6, []int{10} } func (m *Trace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -194,7 +619,7 @@ func (m *PushRequest) Reset() { *m = PushRequest{} } func (m *PushRequest) String() string { return proto.CompactTextString(m) } func (*PushRequest) ProtoMessage() {} func (*PushRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f22805646f4f62b6, []int{3} + return fileDescriptor_f22805646f4f62b6, []int{11} } func (m *PushRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -237,7 +662,7 @@ func (m *PushResponse) Reset() { *m = PushResponse{} } func (m *PushResponse) String() string { return proto.CompactTextString(m) } func (*PushResponse) ProtoMessage() {} func (*PushResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f22805646f4f62b6, []int{4} + return fileDescriptor_f22805646f4f62b6, []int{12} } func (m *PushResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,13 +698,15 @@ type PushBytesRequest struct { Traces []PreallocBytes `protobuf:"bytes,2,rep,name=traces,proto3,customtype=PreallocBytes" json:"traces"` // trace ids. length must match traces Ids []PreallocBytes `protobuf:"bytes,3,rep,name=ids,proto3,customtype=PreallocBytes" json:"ids"` + // search data, length must match traces + SearchData []PreallocBytes `protobuf:"bytes,4,rep,name=searchData,proto3,customtype=PreallocBytes" json:"searchData"` } func (m *PushBytesRequest) Reset() { *m = PushBytesRequest{} } func (m *PushBytesRequest) String() string { return proto.CompactTextString(m) } func (*PushBytesRequest) ProtoMessage() {} func (*PushBytesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f22805646f4f62b6, []int{5} + return fileDescriptor_f22805646f4f62b6, []int{13} } func (m *PushBytesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -317,7 +744,7 @@ func (m *TraceBytes) Reset() { *m = TraceBytes{} } func (m *TraceBytes) String() string { return proto.CompactTextString(m) } func (*TraceBytes) ProtoMessage() {} func (*TraceBytes) Descriptor() ([]byte, []int) { - return fileDescriptor_f22805646f4f62b6, []int{6} + return fileDescriptor_f22805646f4f62b6, []int{14} } func (m *TraceBytes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,6 +783,15 @@ func (m *TraceBytes) GetTraces() [][]byte { func init() { proto.RegisterType((*TraceByIDRequest)(nil), "tempopb.TraceByIDRequest") proto.RegisterType((*TraceByIDResponse)(nil), "tempopb.TraceByIDResponse") + proto.RegisterType((*SearchRequest)(nil), "tempopb.SearchRequest") + proto.RegisterMapType((map[string]string)(nil), "tempopb.SearchRequest.TagsEntry") + proto.RegisterType((*SearchResponse)(nil), "tempopb.SearchResponse") + proto.RegisterType((*TraceSearchMetadata)(nil), "tempopb.TraceSearchMetadata") + proto.RegisterType((*SearchMetrics)(nil), "tempopb.SearchMetrics") + proto.RegisterType((*SearchTagsRequest)(nil), "tempopb.SearchTagsRequest") + proto.RegisterType((*SearchTagsResponse)(nil), "tempopb.SearchTagsResponse") + proto.RegisterType((*SearchTagValuesRequest)(nil), "tempopb.SearchTagValuesRequest") + proto.RegisterType((*SearchTagValuesResponse)(nil), "tempopb.SearchTagValuesResponse") proto.RegisterType((*Trace)(nil), "tempopb.Trace") proto.RegisterType((*PushRequest)(nil), "tempopb.PushRequest") proto.RegisterType((*PushResponse)(nil), "tempopb.PushResponse") @@ -366,38 +802,62 @@ func init() { func init() { proto.RegisterFile("pkg/tempopb/tempo.proto", fileDescriptor_f22805646f4f62b6) } var fileDescriptor_f22805646f4f62b6 = []byte{ - // 483 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0x41, 0x6f, 0xd3, 0x4c, - 0x10, 0xcd, 0x36, 0x5f, 0x92, 0x66, 0x92, 0x56, 0xfd, 0x56, 0x2d, 0x5d, 0x2c, 0xe4, 0x44, 0x56, - 0x25, 0x72, 0xa9, 0xad, 0xa6, 0xea, 0xa1, 0x27, 0x24, 0x2b, 0x20, 0x7a, 0x40, 0x2a, 0x0e, 0x7f, - 0xc0, 0x76, 0x96, 0x24, 0x6a, 0x9a, 0x75, 0x77, 0xd7, 0x95, 0x72, 0xe3, 0xc4, 0x99, 0x7f, 0xc0, - 0xdf, 0xe9, 0xb1, 0x47, 0xc4, 0xa1, 0x42, 0xc9, 0x1f, 0x41, 0x9e, 0xb5, 0x8d, 0xa9, 0x0a, 0x9c, - 0x32, 0xf3, 0xde, 0x9b, 0x37, 0xe3, 0xa7, 0x0d, 0x1c, 0x26, 0x57, 0x53, 0x4f, 0xf3, 0xeb, 0x44, - 0x24, 0x91, 0xf9, 0x75, 0x13, 0x29, 0xb4, 0xa0, 0xad, 0x1c, 0xb4, 0xf6, 0xb5, 0x0c, 0x63, 0xee, - 0xdd, 0x9e, 0x78, 0x58, 0x18, 0xda, 0x3a, 0x9e, 0xce, 0xf5, 0x2c, 0x8d, 0xdc, 0x58, 0x5c, 0x7b, - 0x53, 0x31, 0x15, 0x1e, 0xc2, 0x51, 0xfa, 0x11, 0x3b, 0x6c, 0xb0, 0x32, 0x72, 0xe7, 0x33, 0x81, - 0xbd, 0x0f, 0xd9, 0xb8, 0xbf, 0xba, 0x18, 0x05, 0xfc, 0x26, 0xe5, 0x4a, 0x53, 0x06, 0x2d, 0xb4, - 0xbc, 0x18, 0x31, 0xd2, 0x27, 0x83, 0x6e, 0x50, 0xb4, 0xd4, 0x06, 0x88, 0x16, 0x22, 0xbe, 0x1a, - 0xeb, 0x50, 0x6a, 0xb6, 0xd5, 0x27, 0x83, 0x76, 0x50, 0x41, 0xa8, 0x05, 0xdb, 0xd8, 0xbd, 0x5e, - 0x4e, 0x58, 0x1d, 0xd9, 0xb2, 0xa7, 0x2f, 0xa0, 0x7d, 0x93, 0x72, 0xb9, 0x7a, 0x27, 0x26, 0x9c, - 0x35, 0x90, 0xfc, 0x05, 0x38, 0xe7, 0xf0, 0x7f, 0xe5, 0x0e, 0x95, 0x88, 0xa5, 0xe2, 0xf4, 0x08, - 0x1a, 0xb8, 0x19, 0xcf, 0xe8, 0x0c, 0x77, 0xdd, 0xfc, 0xdb, 0x5d, 0x94, 0x06, 0x86, 0x74, 0x7c, - 0x68, 0x60, 0x4f, 0xcf, 0xa1, 0x15, 0x85, 0x3a, 0x9e, 0x71, 0xc5, 0x48, 0xbf, 0x3e, 0xe8, 0x0c, - 0x7b, 0xe5, 0x80, 0x89, 0xe8, 0xf6, 0xc4, 0x0d, 0xb8, 0x12, 0xa9, 0x8c, 0xf9, 0x38, 0x09, 0x97, - 0x2a, 0x28, 0xf4, 0xce, 0x08, 0x3a, 0x97, 0xa9, 0x9a, 0x15, 0x09, 0x9c, 0x41, 0x03, 0x99, 0x7c, - 0xf1, 0x3f, 0x7d, 0x8c, 0xda, 0xd9, 0x85, 0xae, 0x71, 0x31, 0xf7, 0x3b, 0x5f, 0x09, 0xec, 0x65, - 0x80, 0xbf, 0xd2, 0x5c, 0x15, 0xde, 0xa7, 0xb0, 0x2d, 0x4d, 0x69, 0xce, 0xec, 0xfa, 0x87, 0x77, - 0x0f, 0xbd, 0xda, 0xf7, 0x87, 0xde, 0xce, 0xa5, 0xe4, 0xe1, 0x62, 0x21, 0x62, 0xd4, 0x33, 0x12, - 0x94, 0x42, 0x7a, 0x0c, 0x4d, 0x5c, 0xad, 0xd8, 0x16, 0x8e, 0x1c, 0x3c, 0x39, 0x12, 0xe4, 0x22, - 0xfa, 0x12, 0xea, 0xf3, 0x89, 0x62, 0xf5, 0xbf, 0x69, 0x33, 0x85, 0x73, 0x04, 0x90, 0xc7, 0xae, - 0xb9, 0xa2, 0xcf, 0xca, 0x2d, 0x78, 0x58, 0x61, 0x37, 0xfc, 0x44, 0xa0, 0x99, 0x7d, 0x07, 0x97, - 0xf4, 0x0c, 0xfe, 0xcb, 0x2a, 0xba, 0x5f, 0x46, 0x52, 0xc9, 0xcd, 0x3a, 0x78, 0x84, 0xe6, 0x39, - 0xd4, 0xe8, 0x2b, 0x68, 0x97, 0x41, 0xd0, 0xe7, 0xbf, 0xa9, 0xaa, 0xe1, 0xfc, 0xd1, 0x60, 0x38, - 0x86, 0xd6, 0xfb, 0x94, 0xcb, 0x39, 0x97, 0xf4, 0x2d, 0xec, 0xbc, 0x99, 0x2f, 0x27, 0xe5, 0x73, - 0xa9, 0xf8, 0x3d, 0x7e, 0xca, 0x96, 0xf5, 0x14, 0x55, 0x98, 0xfa, 0xec, 0x6e, 0x6d, 0x93, 0xfb, - 0xb5, 0x4d, 0x7e, 0xac, 0x6d, 0xf2, 0x65, 0x63, 0xd7, 0xee, 0x37, 0x76, 0xed, 0xdb, 0xc6, 0xae, - 0x45, 0x4d, 0xfc, 0x7b, 0x9c, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x16, 0x85, 0xb9, 0x7e, 0x87, - 0x03, 0x00, 0x00, + // 880 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0x4f, 0x8f, 0xda, 0x46, + 0x14, 0xc7, 0x0b, 0x2c, 0xeb, 0xc7, 0x9f, 0xec, 0x4e, 0x36, 0xe0, 0xba, 0x2b, 0x40, 0xd6, 0xaa, + 0xe5, 0xd0, 0x40, 0x42, 0xba, 0x4a, 0x93, 0x1e, 0x2a, 0x21, 0xd2, 0x36, 0x52, 0x89, 0x52, 0x43, + 0x73, 0x1f, 0xcc, 0x94, 0xb5, 0x16, 0x6c, 0x32, 0x1e, 0xa3, 0xe5, 0xd6, 0x53, 0x2f, 0xbd, 0xf4, + 0xab, 0xf4, 0x5b, 0xe4, 0x52, 0x29, 0xa7, 0xaa, 0xea, 0x21, 0xaa, 0x76, 0xa5, 0x7e, 0x8e, 0x6a, + 0x66, 0xec, 0xc1, 0x36, 0x24, 0x39, 0x31, 0xef, 0xf7, 0x7e, 0xef, 0xf9, 0xbd, 0xdf, 0xbc, 0x37, + 0x40, 0x63, 0x75, 0x35, 0xef, 0x31, 0xb2, 0x5c, 0xf9, 0xab, 0xa9, 0xfc, 0xed, 0xae, 0xa8, 0xcf, + 0x7c, 0x54, 0x8a, 0x40, 0xf3, 0x94, 0x51, 0xec, 0x90, 0xde, 0xfa, 0x61, 0x4f, 0x1c, 0xa4, 0xdb, + 0xbc, 0x3f, 0x77, 0xd9, 0x65, 0x38, 0xed, 0x3a, 0xfe, 0xb2, 0x37, 0xf7, 0xe7, 0x7e, 0x4f, 0xc0, + 0xd3, 0xf0, 0x67, 0x61, 0x09, 0x43, 0x9c, 0x24, 0xdd, 0xfa, 0x55, 0x83, 0xe3, 0x09, 0x0f, 0x1f, + 0x6c, 0x9e, 0x0f, 0x6d, 0xf2, 0x3a, 0x24, 0x01, 0x43, 0x06, 0x94, 0x44, 0xca, 0xe7, 0x43, 0x43, + 0x6b, 0x6b, 0x9d, 0x8a, 0x1d, 0x9b, 0xa8, 0x09, 0x30, 0x5d, 0xf8, 0xce, 0xd5, 0x98, 0x61, 0xca, + 0x8c, 0x83, 0xb6, 0xd6, 0xd1, 0xed, 0x04, 0x82, 0x4c, 0x38, 0x12, 0xd6, 0x33, 0x6f, 0x66, 0xe4, + 0x85, 0x57, 0xd9, 0xe8, 0x0c, 0xf4, 0xd7, 0x21, 0xa1, 0x9b, 0x91, 0x3f, 0x23, 0x46, 0x51, 0x38, + 0xb7, 0x80, 0xf5, 0x04, 0x4e, 0x12, 0x75, 0x04, 0x2b, 0xdf, 0x0b, 0x08, 0x3a, 0x87, 0xa2, 0xf8, + 0xb2, 0x28, 0xa3, 0xdc, 0xaf, 0x75, 0xa3, 0xde, 0xbb, 0x82, 0x6a, 0x4b, 0xa7, 0xf5, 0x9f, 0x06, + 0xd5, 0x31, 0xc1, 0xd4, 0xb9, 0x8c, 0x1b, 0x78, 0x0a, 0x85, 0x09, 0x9e, 0x07, 0x86, 0xd6, 0xce, + 0x77, 0xca, 0xfd, 0xb6, 0x0a, 0x4b, 0xb1, 0xba, 0x9c, 0xf2, 0xcc, 0x63, 0x74, 0x33, 0x28, 0xbc, + 0x79, 0xd7, 0xca, 0xd9, 0x22, 0x06, 0x9d, 0x43, 0x75, 0xe4, 0x7a, 0xc3, 0x90, 0x62, 0xe6, 0xfa, + 0xde, 0x28, 0x10, 0x5d, 0x56, 0xed, 0x34, 0x28, 0x58, 0xf8, 0x3a, 0xc1, 0xca, 0x47, 0xac, 0x24, + 0x88, 0x4e, 0xa1, 0xf8, 0x83, 0xbb, 0x74, 0x99, 0x51, 0x10, 0x5e, 0x69, 0x98, 0x8f, 0x41, 0x57, + 0x9f, 0x46, 0xc7, 0x90, 0xbf, 0x22, 0x1b, 0xd1, 0xa0, 0x6e, 0xf3, 0x23, 0x0f, 0x5a, 0xe3, 0x45, + 0x48, 0x22, 0x79, 0xa5, 0xf1, 0xf4, 0xe0, 0x2b, 0xcd, 0xba, 0x86, 0x5a, 0xdc, 0x41, 0x24, 0xd0, + 0x97, 0x70, 0x28, 0x34, 0x88, 0x5b, 0x3d, 0x4b, 0x2b, 0x24, 0xd9, 0x23, 0xc2, 0xf0, 0x0c, 0x33, + 0x6c, 0x47, 0x5c, 0xf4, 0x00, 0x4a, 0x4b, 0xc2, 0xa8, 0xeb, 0xc8, 0xe6, 0xca, 0xfd, 0x7a, 0x46, + 0xa1, 0x91, 0xf4, 0xda, 0x31, 0xcd, 0xfa, 0x53, 0x83, 0xbb, 0x7b, 0x32, 0x66, 0x27, 0x45, 0xdf, + 0x4e, 0x4a, 0x07, 0xee, 0x50, 0xdf, 0x67, 0x63, 0x42, 0xd7, 0xae, 0x43, 0x5e, 0xe0, 0x65, 0xdc, + 0x4f, 0x16, 0xe6, 0x52, 0x72, 0x48, 0xa4, 0x17, 0x3c, 0x39, 0x38, 0x69, 0x10, 0x7d, 0x01, 0x27, + 0x01, 0x1f, 0xb1, 0x89, 0xbb, 0x24, 0x3f, 0x79, 0xee, 0xf5, 0x0b, 0xec, 0xf9, 0x42, 0xd6, 0x82, + 0xbd, 0xeb, 0xe0, 0x73, 0x3a, 0xdb, 0xde, 0x4d, 0x51, 0xa8, 0x9f, 0x40, 0xac, 0xdf, 0xd4, 0xc8, + 0x44, 0xad, 0xf2, 0x7a, 0x5d, 0x2f, 0x58, 0x11, 0x87, 0x91, 0xd9, 0x24, 0x96, 0x94, 0x87, 0x65, + 0x61, 0xf4, 0x19, 0xd4, 0x14, 0x34, 0xd8, 0x30, 0x22, 0x45, 0x2c, 0xd8, 0x19, 0x34, 0x95, 0x71, + 0xc0, 0x97, 0x20, 0x1e, 0x92, 0x2c, 0x6c, 0xdd, 0x85, 0x13, 0x59, 0x0c, 0x1f, 0x8b, 0x68, 0x3a, + 0xad, 0x07, 0x80, 0x92, 0x60, 0x74, 0xe1, 0x26, 0x1c, 0x31, 0x3c, 0xe7, 0x8a, 0xc8, 0x2b, 0xd7, + 0x6d, 0x65, 0x5b, 0x7d, 0xa8, 0xab, 0x88, 0x57, 0x7c, 0x68, 0x82, 0xe4, 0x42, 0x4b, 0x96, 0xba, + 0x26, 0x69, 0x5a, 0x8f, 0xa1, 0xb1, 0x13, 0x13, 0x7d, 0xea, 0x0c, 0x74, 0x16, 0x83, 0xd1, 0xb7, + 0xb6, 0x80, 0x35, 0x80, 0xa2, 0xd0, 0x03, 0x3d, 0x81, 0xd2, 0x14, 0x33, 0xe7, 0x52, 0xcd, 0x60, + 0x4b, 0x0d, 0x93, 0x7c, 0x97, 0xd6, 0x0f, 0xbb, 0x36, 0x09, 0xfc, 0x90, 0x3a, 0x64, 0xbc, 0xc2, + 0x5e, 0x60, 0xc7, 0x7c, 0x6b, 0x08, 0xe5, 0x97, 0x61, 0xa0, 0xb6, 0xf6, 0x02, 0x8a, 0xc2, 0x13, + 0x6d, 0xfb, 0x47, 0xf3, 0x48, 0xb6, 0x55, 0x83, 0x8a, 0xcc, 0x22, 0xeb, 0xb6, 0xfe, 0xd2, 0xe0, + 0x98, 0x03, 0xe2, 0x16, 0xe2, 0xdc, 0x8f, 0xe0, 0x88, 0xca, 0xa3, 0x2c, 0xb3, 0x32, 0x68, 0xf0, + 0x9d, 0xff, 0xe7, 0x5d, 0xab, 0xfa, 0x92, 0x12, 0xbc, 0x58, 0xf8, 0x8e, 0xbc, 0x4b, 0xcd, 0x56, + 0x44, 0x74, 0x5f, 0x6d, 0xd7, 0x81, 0x08, 0xb9, 0xb7, 0x37, 0x44, 0xad, 0xd5, 0xe7, 0x90, 0x77, + 0x67, 0xfc, 0x92, 0x3f, 0xc0, 0xe5, 0x0c, 0x74, 0x01, 0x10, 0x08, 0xd1, 0x87, 0x98, 0x61, 0xa3, + 0xf0, 0x21, 0x7e, 0x82, 0x68, 0x9d, 0x03, 0x44, 0x4f, 0x24, 0x1f, 0xaf, 0x7a, 0x6a, 0xf5, 0x2b, + 0x71, 0x15, 0xfd, 0x5f, 0x34, 0x38, 0xe4, 0xed, 0x13, 0x8a, 0x2e, 0xa0, 0xc0, 0x4f, 0xe8, 0x54, + 0x29, 0x99, 0x90, 0xdb, 0xbc, 0x97, 0x41, 0x23, 0xf9, 0x72, 0xe8, 0x1b, 0xd0, 0x95, 0x7e, 0xe8, + 0x93, 0x14, 0x2b, 0xa9, 0xe9, 0x7b, 0x13, 0xf4, 0xff, 0x38, 0x80, 0xd2, 0x8f, 0x21, 0xa1, 0x2e, + 0xa1, 0xe8, 0x7b, 0xa8, 0x7e, 0xeb, 0x7a, 0x33, 0xf5, 0xb6, 0x27, 0x12, 0x66, 0xff, 0x77, 0x4c, + 0x73, 0x9f, 0x4b, 0x95, 0xf5, 0x35, 0x1c, 0xca, 0x51, 0x45, 0xf5, 0xfd, 0x0f, 0xba, 0xd9, 0xd8, + 0xc1, 0x55, 0xf0, 0x77, 0x00, 0xdb, 0x6d, 0x42, 0x66, 0x86, 0x98, 0xd8, 0x3b, 0xf3, 0xd3, 0xbd, + 0x3e, 0x95, 0xe8, 0x15, 0xdc, 0xc9, 0x2c, 0x0c, 0x6a, 0xed, 0x46, 0xa4, 0xd6, 0xcf, 0x6c, 0xbf, + 0x9f, 0x10, 0xe7, 0x1d, 0x18, 0x6f, 0x6e, 0x9a, 0xda, 0xdb, 0x9b, 0xa6, 0xf6, 0xef, 0x4d, 0x53, + 0xfb, 0xfd, 0xb6, 0x99, 0x7b, 0x7b, 0xdb, 0xcc, 0xfd, 0x7d, 0xdb, 0xcc, 0x4d, 0x0f, 0xc5, 0x3f, + 0xf5, 0xa3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x73, 0x50, 0xfc, 0xa3, 0x12, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -521,6 +981,9 @@ var _Pusher_serviceDesc = grpc.ServiceDesc{ // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type QuerierClient interface { FindTraceByID(ctx context.Context, in *TraceByIDRequest, opts ...grpc.CallOption) (*TraceByIDResponse, error) + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + SearchTags(ctx context.Context, in *SearchTagsRequest, opts ...grpc.CallOption) (*SearchTagsResponse, error) + SearchTagValues(ctx context.Context, in *SearchTagValuesRequest, opts ...grpc.CallOption) (*SearchTagValuesResponse, error) } type querierClient struct { @@ -540,9 +1003,39 @@ func (c *querierClient) FindTraceByID(ctx context.Context, in *TraceByIDRequest, return out, nil } -// QuerierServer is the server API for Querier service. +func (c *querierClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := c.cc.Invoke(ctx, "/tempopb.Querier/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *querierClient) SearchTags(ctx context.Context, in *SearchTagsRequest, opts ...grpc.CallOption) (*SearchTagsResponse, error) { + out := new(SearchTagsResponse) + err := c.cc.Invoke(ctx, "/tempopb.Querier/SearchTags", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *querierClient) SearchTagValues(ctx context.Context, in *SearchTagValuesRequest, opts ...grpc.CallOption) (*SearchTagValuesResponse, error) { + out := new(SearchTagValuesResponse) + err := c.cc.Invoke(ctx, "/tempopb.Querier/SearchTagValues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QuerierServer is the server API for Querier service. type QuerierServer interface { FindTraceByID(context.Context, *TraceByIDRequest) (*TraceByIDResponse, error) + Search(context.Context, *SearchRequest) (*SearchResponse, error) + SearchTags(context.Context, *SearchTagsRequest) (*SearchTagsResponse, error) + SearchTagValues(context.Context, *SearchTagValuesRequest) (*SearchTagValuesResponse, error) } // UnimplementedQuerierServer can be embedded to have forward compatible implementations. @@ -552,6 +1045,15 @@ type UnimplementedQuerierServer struct { func (*UnimplementedQuerierServer) FindTraceByID(ctx context.Context, req *TraceByIDRequest) (*TraceByIDResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FindTraceByID not implemented") } +func (*UnimplementedQuerierServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedQuerierServer) SearchTags(ctx context.Context, req *SearchTagsRequest) (*SearchTagsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SearchTags not implemented") +} +func (*UnimplementedQuerierServer) SearchTagValues(ctx context.Context, req *SearchTagValuesRequest) (*SearchTagValuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SearchTagValues not implemented") +} func RegisterQuerierServer(s *grpc.Server, srv QuerierServer) { s.RegisterService(&_Querier_serviceDesc, srv) @@ -575,6 +1077,60 @@ func _Querier_FindTraceByID_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Querier_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tempopb.Querier/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Querier_SearchTags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchTagsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).SearchTags(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tempopb.Querier/SearchTags", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).SearchTags(ctx, req.(*SearchTagsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Querier_SearchTagValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchTagValuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).SearchTagValues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tempopb.Querier/SearchTagValues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).SearchTagValues(ctx, req.(*SearchTagValuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Querier_serviceDesc = grpc.ServiceDesc{ ServiceName: "tempopb.Querier", HandlerType: (*QuerierServer)(nil), @@ -583,6 +1139,18 @@ var _Querier_serviceDesc = grpc.ServiceDesc{ MethodName: "FindTraceByID", Handler: _Querier_FindTraceByID_Handler, }, + { + MethodName: "Search", + Handler: _Querier_Search_Handler, + }, + { + MethodName: "SearchTags", + Handler: _Querier_SearchTags_Handler, + }, + { + MethodName: "SearchTagValues", + Handler: _Querier_SearchTagValues_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/tempopb/tempo.proto", @@ -674,7 +1242,7 @@ func (m *TraceByIDResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Trace) Marshal() (dAtA []byte, err error) { +func (m *SearchRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -684,26 +1252,46 @@ func (m *Trace) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Trace) MarshalTo(dAtA []byte) (int, error) { +func (m *SearchRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Trace) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SearchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Batches) > 0 { - for iNdEx := len(m.Batches) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Batches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTempo(dAtA, i, uint64(size)) - } + if m.Limit != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 + } + if m.MaxDurationMs != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.MaxDurationMs)) + i-- + dAtA[i] = 0x18 + } + if m.MinDurationMs != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.MinDurationMs)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tags) > 0 { + for k := range m.Tags { + v := m.Tags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintTempo(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTempo(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTempo(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0xa } @@ -711,7 +1299,7 @@ func (m *Trace) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PushRequest) Marshal() (dAtA []byte, err error) { +func (m *SearchResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -721,19 +1309,19 @@ func (m *PushRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PushRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *SearchResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PushRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SearchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Batch != nil { + if m.Metrics != nil { { - size, err := m.Batch.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Metrics.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -741,12 +1329,26 @@ func (m *PushRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTempo(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + } + if len(m.Traces) > 0 { + for iNdEx := len(m.Traces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Traces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *PushResponse) Marshal() (dAtA []byte, err error) { +func (m *TraceSearchMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -756,20 +1358,51 @@ func (m *PushResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PushResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *TraceSearchMetadata) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PushResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TraceSearchMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.DurationMs != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.DurationMs)) + i-- + dAtA[i] = 0x28 + } + if m.StartTimeUnixNano != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x20 + } + if len(m.RootTraceName) > 0 { + i -= len(m.RootTraceName) + copy(dAtA[i:], m.RootTraceName) + i = encodeVarintTempo(dAtA, i, uint64(len(m.RootTraceName))) + i-- + dAtA[i] = 0x1a + } + if len(m.RootServiceName) > 0 { + i -= len(m.RootServiceName) + copy(dAtA[i:], m.RootServiceName) + i = encodeVarintTempo(dAtA, i, uint64(len(m.RootServiceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.TraceID) > 0 { + i -= len(m.TraceID) + copy(dAtA[i:], m.TraceID) + i = encodeVarintTempo(dAtA, i, uint64(len(m.TraceID))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *PushBytesRequest) Marshal() (dAtA []byte, err error) { +func (m *SearchMetrics) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -779,62 +1412,35 @@ func (m *PushBytesRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PushBytesRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *SearchMetrics) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PushBytesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SearchMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Ids) > 0 { - for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Ids[iNdEx].Size() - i -= size - if _, err := m.Ids[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTempo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + if m.InspectedBlocks != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.InspectedBlocks)) + i-- + dAtA[i] = 0x18 } - if len(m.Traces) > 0 { - for iNdEx := len(m.Traces) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Traces[iNdEx].Size() - i -= size - if _, err := m.Traces[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTempo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + if m.InspectedBytes != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.InspectedBytes)) + i-- + dAtA[i] = 0x10 } - if len(m.Requests) > 0 { - for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Requests[iNdEx].Size() - i -= size - if _, err := m.Requests[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintTempo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if m.InspectedTraces != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.InspectedTraces)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *TraceBytes) Marshal() (dAtA []byte, err error) { +func (m *SearchTagsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -844,163 +1450,1406 @@ func (m *TraceBytes) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TraceBytes) MarshalTo(dAtA []byte) (int, error) { +func (m *SearchTagsRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TraceBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SearchTagsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Traces) > 0 { - for iNdEx := len(m.Traces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Traces[iNdEx]) - copy(dAtA[i:], m.Traces[iNdEx]) - i = encodeVarintTempo(dAtA, i, uint64(len(m.Traces[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func encodeVarintTempo(dAtA []byte, offset int, v uint64) int { - offset -= sovTempo(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *SearchTagsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *TraceByIDRequest) Size() (n int) { - if m == nil { - return 0 - } + +func (m *SearchTagsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SearchTagsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TraceID) - if l > 0 { - n += 1 + l + sovTempo(uint64(l)) - } - l = len(m.BlockStart) - if l > 0 { - n += 1 + l + sovTempo(uint64(l)) - } - l = len(m.BlockEnd) - if l > 0 { - n += 1 + l + sovTempo(uint64(l)) - } - l = len(m.QueryMode) - if l > 0 { - n += 1 + l + sovTempo(uint64(l)) + if len(m.TagNames) > 0 { + for iNdEx := len(m.TagNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TagNames[iNdEx]) + copy(dAtA[i:], m.TagNames[iNdEx]) + i = encodeVarintTempo(dAtA, i, uint64(len(m.TagNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *TraceByIDResponse) Size() (n int) { - if m == nil { - return 0 +func (m *SearchTagValuesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SearchTagValuesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SearchTagValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Trace != nil { - l = m.Trace.Size() - n += 1 + l + sovTempo(uint64(l)) + if len(m.TagName) > 0 { + i -= len(m.TagName) + copy(dAtA[i:], m.TagName) + i = encodeVarintTempo(dAtA, i, uint64(len(m.TagName))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Trace) Size() (n int) { - if m == nil { - return 0 +func (m *SearchTagValuesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SearchTagValuesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SearchTagValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TagValues) > 0 { + for iNdEx := len(m.TagValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TagValues[iNdEx]) + copy(dAtA[i:], m.TagValues[iNdEx]) + i = encodeVarintTempo(dAtA, i, uint64(len(m.TagValues[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Trace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Trace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Trace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.Batches) > 0 { - for _, e := range m.Batches { - l = e.Size() - n += 1 + l + sovTempo(uint64(l)) + for iNdEx := len(m.Batches) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Batches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *PushRequest) Size() (n int) { - if m == nil { - return 0 +func (m *PushRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *PushRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PushRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if m.Batch != nil { - l = m.Batch.Size() - n += 1 + l + sovTempo(uint64(l)) + { + size, err := m.Batch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *PushResponse) Size() (n int) { - if m == nil { - return 0 +func (m *PushResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *PushResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PushResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - return n + return len(dAtA) - i, nil } -func (m *PushBytesRequest) Size() (n int) { - if m == nil { - return 0 +func (m *PushBytesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *PushBytesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PushBytesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Requests) > 0 { - for _, e := range m.Requests { - l = e.Size() - n += 1 + l + sovTempo(uint64(l)) + if len(m.SearchData) > 0 { + for iNdEx := len(m.SearchData) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.SearchData[iNdEx].Size() + i -= size + if _, err := m.SearchData[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Ids) > 0 { + for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Ids[iNdEx].Size() + i -= size + if _, err := m.Ids[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } if len(m.Traces) > 0 { - for _, e := range m.Traces { - l = e.Size() - n += 1 + l + sovTempo(uint64(l)) + for iNdEx := len(m.Traces) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Traces[iNdEx].Size() + i -= size + if _, err := m.Traces[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if len(m.Ids) > 0 { - for _, e := range m.Ids { - l = e.Size() - n += 1 + l + sovTempo(uint64(l)) + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Requests[iNdEx].Size() + i -= size + if _, err := m.Requests[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *TraceBytes) Size() (n int) { +func (m *TraceBytes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceBytes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Traces) > 0 { + for iNdEx := len(m.Traces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Traces[iNdEx]) + copy(dAtA[i:], m.Traces[iNdEx]) + i = encodeVarintTempo(dAtA, i, uint64(len(m.Traces[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintTempo(dAtA []byte, offset int, v uint64) int { + offset -= sovTempo(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TraceByIDRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Traces) > 0 { - for _, b := range m.Traces { - l = len(b) - n += 1 + l + sovTempo(uint64(l)) + l = len(m.TraceID) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + l = len(m.BlockStart) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + l = len(m.BlockEnd) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + l = len(m.QueryMode) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + return n +} + +func (m *TraceByIDResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Trace != nil { + l = m.Trace.Size() + n += 1 + l + sovTempo(uint64(l)) + } + return n +} + +func (m *SearchRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tags) > 0 { + for k, v := range m.Tags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTempo(uint64(len(k))) + 1 + len(v) + sovTempo(uint64(len(v))) + n += mapEntrySize + 1 + sovTempo(uint64(mapEntrySize)) + } + } + if m.MinDurationMs != 0 { + n += 1 + sovTempo(uint64(m.MinDurationMs)) + } + if m.MaxDurationMs != 0 { + n += 1 + sovTempo(uint64(m.MaxDurationMs)) + } + if m.Limit != 0 { + n += 1 + sovTempo(uint64(m.Limit)) + } + return n +} + +func (m *SearchResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Traces) > 0 { + for _, e := range m.Traces { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + if m.Metrics != nil { + l = m.Metrics.Size() + n += 1 + l + sovTempo(uint64(l)) + } + return n +} + +func (m *TraceSearchMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TraceID) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + l = len(m.RootServiceName) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + l = len(m.RootTraceName) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + if m.StartTimeUnixNano != 0 { + n += 1 + sovTempo(uint64(m.StartTimeUnixNano)) + } + if m.DurationMs != 0 { + n += 1 + sovTempo(uint64(m.DurationMs)) + } + return n +} + +func (m *SearchMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InspectedTraces != 0 { + n += 1 + sovTempo(uint64(m.InspectedTraces)) + } + if m.InspectedBytes != 0 { + n += 1 + sovTempo(uint64(m.InspectedBytes)) + } + if m.InspectedBlocks != 0 { + n += 1 + sovTempo(uint64(m.InspectedBlocks)) + } + return n +} + +func (m *SearchTagsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *SearchTagsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TagNames) > 0 { + for _, s := range m.TagNames { + l = len(s) + n += 1 + l + sovTempo(uint64(l)) + } + } + return n +} + +func (m *SearchTagValuesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TagName) + if l > 0 { + n += 1 + l + sovTempo(uint64(l)) + } + return n +} + +func (m *SearchTagValuesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TagValues) > 0 { + for _, s := range m.TagValues { + l = len(s) + n += 1 + l + sovTempo(uint64(l)) + } + } + return n +} + +func (m *Trace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Batches) > 0 { + for _, e := range m.Batches { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + return n +} + +func (m *PushRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Batch != nil { + l = m.Batch.Size() + n += 1 + l + sovTempo(uint64(l)) + } + return n +} + +func (m *PushResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PushBytesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + if len(m.Traces) > 0 { + for _, e := range m.Traces { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + if len(m.Ids) > 0 { + for _, e := range m.Ids { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + if len(m.SearchData) > 0 { + for _, e := range m.SearchData { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + return n +} + +func (m *TraceBytes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Traces) > 0 { + for _, b := range m.Traces { + l = len(b) + n += 1 + l + sovTempo(uint64(l)) + } + } + return n +} + +func sovTempo(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTempo(x uint64) (n int) { + return sovTempo(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceByIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceByIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceID = append(m.TraceID[:0], dAtA[iNdEx:postIndex]...) + if m.TraceID == nil { + m.TraceID = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockStart", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockStart = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockEnd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockEnd = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueryMode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TraceByIDResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceByIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceByIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Trace == nil { + m.Trace = &Trace{} + } + if err := m.Trace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SearchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SearchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SearchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tags == nil { + m.Tags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTempo + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTempo + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTempo + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthTempo + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Tags[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDurationMs", wireType) + } + m.MinDurationMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDurationMs |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDurationMs", wireType) + } + m.MaxDurationMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxDurationMs |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SearchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SearchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SearchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Traces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Traces = append(m.Traces, &TraceSearchMetadata{}) + if err := m.Traces[len(m.Traces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = &SearchMetrics{} + } + if err := m.Metrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TraceSearchMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TraceSearchMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TraceSearchMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootTraceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootTraceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimeUnixNano |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMs", wireType) + } + m.DurationMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMs |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - return n -} -func sovTempo(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTempo(x uint64) (n int) { - return sovTempo(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { +func (m *SearchMetrics) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1023,17 +2872,17 @@ func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TraceByIDRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SearchMetrics: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TraceByIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SearchMetrics: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InspectedTraces", wireType) } - var byteLen int + m.InspectedTraces = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTempo @@ -1043,31 +2892,35 @@ func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.InspectedTraces |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTempo - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTempo - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InspectedBytes", wireType) } - m.TraceID = append(m.TraceID[:0], dAtA[iNdEx:postIndex]...) - if m.TraceID == nil { - m.TraceID = []byte{} + m.InspectedBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InspectedBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockStart", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InspectedBlocks", wireType) } - var stringLen uint64 + m.InspectedBlocks = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTempo @@ -1077,27 +2930,114 @@ func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.InspectedBlocks |= uint32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTempo } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SearchTagsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SearchTagsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SearchTagsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTempo } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.BlockStart = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SearchTagsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SearchTagsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SearchTagsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockEnd", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TagNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1125,11 +3065,61 @@ func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockEnd = string(dAtA[iNdEx:postIndex]) + m.TagNames = append(m.TagNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SearchTagValuesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SearchTagValuesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SearchTagValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TagName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1157,7 +3147,7 @@ func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.QueryMode = string(dAtA[iNdEx:postIndex]) + m.TagName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1180,7 +3170,7 @@ func (m *TraceByIDRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *TraceByIDResponse) Unmarshal(dAtA []byte) error { +func (m *SearchTagValuesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1203,17 +3193,17 @@ func (m *TraceByIDResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TraceByIDResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SearchTagValuesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TraceByIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SearchTagValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TagValues", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTempo @@ -1223,27 +3213,23 @@ func (m *TraceByIDResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTempo } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTempo } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Trace == nil { - m.Trace = &Trace{} - } - if err := m.Trace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TagValues = append(m.TagValues, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -1620,6 +3606,41 @@ func (m *PushBytesRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SearchData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v PreallocBytes + m.SearchData = append(m.SearchData, v) + if err := m.SearchData[len(m.SearchData)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTempo(dAtA[iNdEx:]) diff --git a/pkg/tempopb/tempo.proto b/pkg/tempopb/tempo.proto index 2668018845e..6fd1025c698 100644 --- a/pkg/tempopb/tempo.proto +++ b/pkg/tempopb/tempo.proto @@ -12,6 +12,9 @@ service Pusher { service Querier { rpc FindTraceByID(TraceByIDRequest) returns (TraceByIDResponse) {}; + rpc Search(SearchRequest) returns (SearchResponse) {}; + rpc SearchTags(SearchTagsRequest) returns (SearchTagsResponse) {}; + rpc SearchTagValues(SearchTagValuesRequest) returns (SearchTagValuesResponse) {}; } // Read @@ -26,6 +29,48 @@ message TraceByIDResponse { Trace trace = 1; } +message SearchRequest { + // case insensitive partial match + map Tags = 1 [(gogoproto.nullable) = false]; + uint32 MinDurationMs = 2; + uint32 MaxDurationMs = 3; + uint32 Limit = 4; +} + +message SearchResponse { + repeated TraceSearchMetadata traces = 1; + SearchMetrics metrics = 2; +} + +message TraceSearchMetadata { + string traceID = 1; + string rootServiceName = 2; + string rootTraceName = 3; + uint64 startTimeUnixNano = 4; + uint32 durationMs = 5; +} + +message SearchMetrics { + uint32 inspectedTraces = 1; + uint64 inspectedBytes = 2; + uint32 inspectedBlocks = 3; +} + +message SearchTagsRequest { +} + +message SearchTagsResponse { + repeated string tagNames = 1; +} + +message SearchTagValuesRequest { + string tagName = 1; +} + +message SearchTagValuesResponse { + repeated string tagValues = 1; +} + message Trace { repeated tempopb.trace.v1.ResourceSpans batches = 1; } @@ -46,8 +91,21 @@ message PushBytesRequest { repeated bytes traces = 2 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocBytes"]; // trace ids. length must match traces repeated bytes ids = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocBytes"]; + // search data, length must match traces + repeated bytes searchData = 4 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocBytes"]; + //repeated SearchData searchData = 4 [(gogoproto.nullable) = false]; } +/*message SearchData { + map RootData = 1 [(gogoproto.nullable) = false]; +}*/ + + +/*message TraceHeader { + string rootSpanName = 1; + map rootSpanTags = 2; +}*/ + message TraceBytes { // pre-marshalled Traces repeated bytes traces = 1; diff --git a/pkg/tempopb/trace/v1/trace.pb.go b/pkg/tempopb/trace/v1/trace.pb.go index d967030711e..54c41763ce6 100644 --- a/pkg/tempopb/trace/v1/trace.pb.go +++ b/pkg/tempopb/trace/v1/trace.pb.go @@ -34,7 +34,7 @@ const ( // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 // Indicates that the span represents an internal operation within an application, - // as opposed to an operations happening at the boundaries. Default value. + // as opposed to an operation happening at the boundaries. Default value. Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 // Indicates that the span covers server-side handling of an RPC or other // remote network request. @@ -149,7 +149,7 @@ func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { } // For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#set-status +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type Status_StatusCode int32 const ( diff --git a/pkg/util/http.go b/pkg/util/http.go index 718a7cf795e..6c5e42d62cc 100644 --- a/pkg/util/http.go +++ b/pkg/util/http.go @@ -66,3 +66,11 @@ func HexStringToTraceID(id string) ([]byte, error) { return byteID, nil } + +// TraceIDToHexString converts a trace ID to its string representation and removes any leading zeros. +func TraceIDToHexString(byteID []byte) string { + id := hex.EncodeToString(byteID) + // remove leading zeros + id = strings.TrimLeft(id, "0") + return id +} diff --git a/pkg/util/http_test.go b/pkg/util/http_test.go index 0f1ffbe10e4..082394891c0 100644 --- a/pkg/util/http_test.go +++ b/pkg/util/http_test.go @@ -10,7 +10,6 @@ import ( ) func TestHexStringToTraceID(t *testing.T) { - tc := []struct { id string expected []byte @@ -60,6 +59,38 @@ func TestHexStringToTraceID(t *testing.T) { } } +func TestTraceIDToHexString(t *testing.T) { + tc := []struct { + byteID []byte + traceID string + }{ + { + byteID: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12}, + traceID: "12", + }, + { + byteID: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef}, + traceID: "1234567890abcdef", // 64 bit + }, + { + byteID: []byte{0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef}, + traceID: "1234567890abcdef1234567890abcdef", // 128 bit + }, + { + byteID: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xa0}, + traceID: "12a0", // trailing zero + }, + } + + for _, tt := range tc { + t.Run(tt.traceID, func(t *testing.T) { + actual := TraceIDToHexString(tt.byteID) + + assert.Equal(t, tt.traceID, actual) + }) + } +} + // For licensing reasons they strings exist in two packages. This test exists to make sure they don't // drift. func TestEquality(t *testing.T) { diff --git a/tempodb/encoding/appender_buffered_generic.go b/tempodb/encoding/appender_buffered_generic.go new file mode 100644 index 00000000000..a6886ca1881 --- /dev/null +++ b/tempodb/encoding/appender_buffered_generic.go @@ -0,0 +1,97 @@ +package encoding + +import ( + "context" + + "github.com/grafana/tempo/tempodb/encoding/common" +) + +// bufferedAppender buffers objects into pages and builds a downsampled +// index +type BufferedAppenderGeneric struct { + // output writer + writer common.DataWriterGeneric + + // record keeping + records []common.Record + totalObjects int + currentOffset uint64 + currentRecord *common.Record + currentBytesWritten int + + // config + maxPageSize int +} + +// NewBufferedAppender returns an bufferedAppender. This appender builds a writes to +// the provided writer and also builds a downsampled records slice. +func NewBufferedAppenderGeneric(writer common.DataWriterGeneric, maxPageSize int) *BufferedAppenderGeneric { + return &BufferedAppenderGeneric{ + writer: writer, + maxPageSize: maxPageSize, + records: make([]common.Record, 0), + } +} + +// Append appends the id/object to the writer. Note that the caller is giving up ownership of the two byte arrays backing the slices. +// Copies should be made and passed in if this is a problem +func (a *BufferedAppenderGeneric) Append(ctx context.Context, id common.ID, i interface{}) error { + bytesWritten, err := a.writer.Write(ctx, id, i) + if err != nil { + return err + } + + if a.currentRecord == nil { + a.currentRecord = &common.Record{ + Start: a.currentOffset, + } + } + a.totalObjects++ + a.currentBytesWritten += bytesWritten + a.currentRecord.ID = id + + if a.currentBytesWritten > a.maxPageSize { + err := a.flush(ctx) + if err != nil { + return err + } + } + + return nil +} + +// Records returns a slice of the current records +func (a *BufferedAppenderGeneric) Records() []common.Record { + return a.records +} + +// Complete flushes all buffers and releases resources +func (a *BufferedAppenderGeneric) Complete(ctx context.Context) error { + err := a.flush(ctx) + if err != nil { + return err + } + + return a.writer.Complete(ctx) +} + +func (a *BufferedAppenderGeneric) flush(ctx context.Context) error { + if a.currentRecord == nil { + return nil + } + + bytesWritten, err := a.writer.CutPage(ctx) + if err != nil { + return err + } + + a.currentOffset += uint64(bytesWritten) + a.currentRecord.Length += uint32(bytesWritten) + + // update index + a.records = append(a.records, *a.currentRecord) + a.currentRecord = nil + a.currentBytesWritten = 0 + + return nil +} diff --git a/tempodb/encoding/common/types.go b/tempodb/encoding/common/types.go index 316a3a55f47..4ec29f23417 100644 --- a/tempodb/encoding/common/types.go +++ b/tempodb/encoding/common/types.go @@ -36,7 +36,7 @@ type ObjectCombiner interface { // DataReader is the primary abstraction point for supporting multiple data // formats. type DataReader interface { - Read(context.Context, []Record, []byte) ([][]byte, []byte, error) + Read(context.Context, []Record, [][]byte, []byte) ([][]byte, []byte, error) Close() // NextPage can be used to iterate at a page at a time. May return ErrUnsupported for older formats @@ -67,6 +67,20 @@ type DataWriter interface { Complete() error } +// DataWriterGeneric writes objects instead of byte slices +type DataWriterGeneric interface { + + // Write writes the passed ID/obj to the current page + Write(context.Context, ID, interface{}) (int, error) + + // CutPage completes the current page and start a new one. It + // returns the length in bytes of the cut page. + CutPage(context.Context) (int, error) + + // Complete must be called when the operation DataWriter is done. + Complete(context.Context) error +} + // IndexWriter is used to write paged indexes type IndexWriter interface { // Write returns a byte representation of the provided Records diff --git a/tempodb/encoding/finder_paged.go b/tempodb/encoding/finder_paged.go index cd32a3353a3..9c00d4f41f5 100644 --- a/tempodb/encoding/finder_paged.go +++ b/tempodb/encoding/finder_paged.go @@ -77,7 +77,7 @@ func (f *pagedFinder) Find(ctx context.Context, id common.ID) ([]byte, error) { } func (f *pagedFinder) findOne(ctx context.Context, id common.ID, record common.Record) ([]byte, error) { - pages, _, err := f.r.Read(ctx, []common.Record{record}, nil) + pages, _, err := f.r.Read(ctx, []common.Record{record}, nil, nil) if err != nil { return nil, err } diff --git a/tempodb/encoding/iterator_paged.go b/tempodb/encoding/iterator_paged.go index 06ddbf5d5fb..3deb11e7bde 100644 --- a/tempodb/encoding/iterator_paged.go +++ b/tempodb/encoding/iterator_paged.go @@ -88,7 +88,7 @@ func (i *pagedIterator) Next(ctx context.Context) (common.ID, []byte, error) { } } - i.pages, i.buffer, err = i.dataReader.Read(ctx, records, i.buffer) + i.pages, i.buffer, err = i.dataReader.Read(ctx, records, i.pages, i.buffer) if err != nil { return nil, nil, fmt.Errorf("error reading objects for records, blockID: %s, err: %w", i.meta.BlockID.String(), err) } diff --git a/tempodb/encoding/iterator_record.go b/tempodb/encoding/iterator_record.go index 88043dc81f5..8f1953d7cc5 100644 --- a/tempodb/encoding/iterator_record.go +++ b/tempodb/encoding/iterator_record.go @@ -45,7 +45,7 @@ func (i *recordIterator) Next(ctx context.Context) (common.ID, []byte, error) { if len(i.records) > 0 { var pages [][]byte var err error - pages, i.buffer, err = i.dataR.Read(ctx, i.records[:1], i.buffer) + pages, i.buffer, err = i.dataR.Read(ctx, i.records[:1], pages, i.buffer) if err != nil { return nil, nil, err } diff --git a/tempodb/encoding/v0/data_reader.go b/tempodb/encoding/v0/data_reader.go index cf4eb967c0e..258f53f13ec 100644 --- a/tempodb/encoding/v0/data_reader.go +++ b/tempodb/encoding/v0/data_reader.go @@ -27,7 +27,7 @@ func NewDataReader(r backend.ContextReader) common.DataReader { // Read returns the pages requested in the passed records. It // assumes that if there are multiple records they are ordered // and contiguous -func (r *dataReader) Read(ctx context.Context, records []common.Record, buffer []byte) ([][]byte, []byte, error) { +func (r *dataReader) Read(ctx context.Context, records []common.Record, pagesBuffer [][]byte, buffer []byte) ([][]byte, []byte, error) { if len(records) == 0 { return nil, buffer, nil } @@ -38,16 +38,26 @@ func (r *dataReader) Read(ctx context.Context, records []common.Record, buffer [ length += record.Length } - buffer = make([]byte, length) + // Reset/resize buffer + if cap(buffer) < int(length) { + buffer = make([]byte, length) + } + buffer = buffer[:length] + _, err := r.r.ReadAt(ctx, buffer, int64(start)) if err != nil { return nil, nil, err } - slicePages := make([][]byte, 0, len(records)) + //Reset/resize buffer + if cap(pagesBuffer) < len(records) { + pagesBuffer = make([][]byte, 0, len(records)) + } + pagesBuffer = pagesBuffer[:len(records)] + cursor := uint32(0) previousEnd := uint64(0) - for _, record := range records { + for i, record := range records { end := cursor + record.Length if end > uint32(len(buffer)) { return nil, nil, fmt.Errorf("record out of bounds while reading pages: %d, %d, %d, %d", cursor, record.Length, end, len(buffer)) @@ -57,12 +67,12 @@ func (r *dataReader) Read(ctx context.Context, records []common.Record, buffer [ return nil, nil, fmt.Errorf("non-contiguous pages requested from dataReader: %d, %+v", previousEnd, record) } - slicePages = append(slicePages, buffer[cursor:end]) + pagesBuffer[i] = buffer[cursor:end] cursor += record.Length previousEnd = record.Start + uint64(record.Length) } - return slicePages, buffer, nil + return pagesBuffer, buffer, nil } // Close implements common.DataReader diff --git a/tempodb/encoding/v0/data_reader_test.go b/tempodb/encoding/v0/data_reader_test.go index d56229fd312..df78f0ddbac 100644 --- a/tempodb/encoding/v0/data_reader_test.go +++ b/tempodb/encoding/v0/data_reader_test.go @@ -126,7 +126,7 @@ func TestDataReader(t *testing.T) { for _, tc := range tests { reader := NewDataReader(backend.NewContextReaderWithAllReader(bytes.NewReader(tc.readerBytes))) - actual, _, err := reader.Read(context.Background(), tc.records, nil) + actual, _, err := reader.Read(context.Background(), tc.records, nil, nil) reader.Close() if tc.expectedError { diff --git a/tempodb/encoding/v1/data_reader.go b/tempodb/encoding/v1/data_reader.go index 6f69eb56710..59b7cb52146 100644 --- a/tempodb/encoding/v1/data_reader.go +++ b/tempodb/encoding/v1/data_reader.go @@ -42,34 +42,36 @@ func NewNestedDataReader(r common.DataReader, encoding backend.Encoding) (common // Read returns the pages requested in the passed records. It // assumes that if there are multiple records they are ordered // and contiguous -func (r *dataReader) Read(ctx context.Context, records []common.Record, buffer []byte) ([][]byte, []byte, error) { - compressedPages, buffer, err := r.dataReader.Read(ctx, records, buffer) +func (r *dataReader) Read(ctx context.Context, records []common.Record, pagesBuffer [][]byte, buffer []byte) ([][]byte, []byte, error) { + var err error + r.compressedPagesBuffer, buffer, err = r.dataReader.Read(ctx, records, r.compressedPagesBuffer, buffer) if err != nil { return nil, nil, err } - if cap(r.compressedPagesBuffer) < len(compressedPages) { - // extend r.compressedPagesBuffer - diff := len(compressedPages) - cap(r.compressedPagesBuffer) - r.compressedPagesBuffer = append(r.compressedPagesBuffer[:cap(r.compressedPagesBuffer)], make([][]byte, diff)...) - } else { - r.compressedPagesBuffer = r.compressedPagesBuffer[:len(compressedPages)] + // Reset/resize buffer + if cap(pagesBuffer) < len(r.compressedPagesBuffer) { + pagesBuffer = make([][]byte, len(r.compressedPagesBuffer)) } + pagesBuffer = pagesBuffer[:len(r.compressedPagesBuffer)] // now decompress - for i, page := range compressedPages { + for i, page := range r.compressedPagesBuffer { reader, err := r.getCompressedReader(page) if err != nil { return nil, nil, err } - r.compressedPagesBuffer[i], err = tempo_io.ReadAllWithBuffer(reader, len(page), r.compressedPagesBuffer[i]) + pagesBuffer[i], err = tempo_io.ReadAllWithBuffer(reader, len(page), pagesBuffer[i]) if err != nil { return nil, nil, err } + // TODO mdisibio - There is a lot of performance penalty here even with no compression. + // Investigate further. + //pagesBuffer[i] = page } - return r.compressedPagesBuffer, buffer, nil + return pagesBuffer, buffer, nil } func (r *dataReader) Close() { diff --git a/tempodb/encoding/v1/data_reader_test.go b/tempodb/encoding/v1/data_reader_test.go index ff63560049c..943d2aa6621 100644 --- a/tempodb/encoding/v1/data_reader_test.go +++ b/tempodb/encoding/v1/data_reader_test.go @@ -48,7 +48,7 @@ func TestAllEncodings(t *testing.T) { Start: 0, Length: uint32(mw.bytesWritten), }, - }, nil) + }, nil, nil) assert.NoError(t, err) assert.Len(t, actual, 1) diff --git a/tempodb/encoding/v2/data_reader.go b/tempodb/encoding/v2/data_reader.go index 6f15a5a66c7..cb4a37441fd 100644 --- a/tempodb/encoding/v2/data_reader.go +++ b/tempodb/encoding/v2/data_reader.go @@ -38,23 +38,22 @@ func NewDataReader(r backend.ContextReader, encoding backend.Encoding) (common.D } // Read implements common.DataReader -func (r *dataReader) Read(ctx context.Context, records []common.Record, buffer []byte) ([][]byte, []byte, error) { - v0Pages, buffer, err := r.dataReader.Read(ctx, records, buffer) +func (r *dataReader) Read(ctx context.Context, records []common.Record, pagesBuffer [][]byte, buffer []byte) ([][]byte, []byte, error) { + pagesBuffer, buffer, err := r.dataReader.Read(ctx, records, pagesBuffer, buffer) if err != nil { return nil, nil, err } - pages := make([][]byte, 0, len(v0Pages)) - for _, v0Page := range v0Pages { - page, err := unmarshalPageFromBytes(v0Page, constDataHeader) + for i := range pagesBuffer { + p, err := unmarshalPageFromBytes(pagesBuffer[i], constDataHeader) if err != nil { return nil, nil, err } - pages = append(pages, page.data) + pagesBuffer[i] = p.data } - return pages, buffer, nil + return pagesBuffer, buffer, nil } func (r *dataReader) Close() { diff --git a/tempodb/encoding/versioned_test.go b/tempodb/encoding/versioned_test.go index 355c090b2db..c414178ec98 100644 --- a/tempodb/encoding/versioned_test.go +++ b/tempodb/encoding/versioned_test.go @@ -66,7 +66,7 @@ func testDataWriterReader(t *testing.T, v VersionedEncoding, e backend.Encoding) Start: 0, Length: uint32(bytesWritten), }, - }, nil) + }, nil, nil) require.NoError(t, err) require.Len(t, actual, 1) diff --git a/tempodb/search/backend_search_block.go b/tempodb/search/backend_search_block.go new file mode 100644 index 00000000000..b9bbfbc99f1 --- /dev/null +++ b/tempodb/search/backend_search_block.go @@ -0,0 +1,209 @@ +package search + +import ( + "bytes" + "context" + + "github.com/google/uuid" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/backend/local" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" +) + +var _ SearchableBlock = (*BackendSearchBlock)(nil) + +const defaultBackendSearchBlockPageSize = 2 * 1024 * 1024 + +type BackendSearchBlock struct { + id uuid.UUID + tenantID string + l *local.Backend +} + +// NewBackendSearchBlock iterates through the given WAL search data and writes it to the persistent backend +// in a more efficient paged form. Multiple traces are written in the same page to make sure of the flatbuffer +// CreateSharedString feature which dedupes strings across the entire buffer. +func NewBackendSearchBlock(input *StreamingSearchBlock, l *local.Backend, blockID uuid.UUID, tenantID string, enc backend.Encoding, pageSizeBytes int) error { + var err error + ctx := context.TODO() + indexPageSize := 100 * 1024 + kv := &tempofb.KeyValues{} // buffer + + // Pinning specific version instead of latest for safety + version, err := encoding.FromVersion("v2") + if err != nil { + return err + } + + if pageSizeBytes <= 0 { + pageSizeBytes = defaultBackendSearchBlockPageSize + } + + // Copy records into the appender + w, err := newBackendSearchBlockWriter(blockID, tenantID, l, version, enc) + if err != nil { + return err + } + + a := encoding.NewBufferedAppenderGeneric(w, pageSizeBytes) + for _, r := range input.appender.Records() { + + // Read + buf := make([]byte, r.Length) + _, err = input.file.ReadAt(buf, int64(r.Start)) + if err != nil { + return err + } + + s := tempofb.SearchEntryFromBytes(buf) + data := &tempofb.SearchEntryMutable{ + TraceID: r.ID, + StartTimeUnixNano: s.StartTimeUnixNano(), + EndTimeUnixNano: s.EndTimeUnixNano(), + } + + l := s.TagsLength() + for i := 0; i < l; i++ { + s.Tags(kv, i) + for j := 0; j < kv.ValueLength(); j++ { + data.AddTag(string(kv.Key()), string(kv.Value(j))) + } + } + + err = a.Append(ctx, r.ID, data) + if err != nil { + return err + } + } + + err = a.Complete(ctx) + if err != nil { + return err + } + + // Write index + ir := a.Records() + i := version.NewIndexWriter(indexPageSize) + indexBytes, err := i.Write(ir) + if err != nil { + return err + } + err = l.Write(ctx, "search-index", backend.KeyPathForBlock(blockID, tenantID), bytes.NewReader(indexBytes), int64(len(indexBytes)), true) + if err != nil { + return err + } + + // Write meta + sm := &BlockMeta{ + IndexPageSize: uint32(indexPageSize), + IndexRecords: uint32(len(ir)), + Version: version.Version(), + Encoding: enc, + } + return WriteSearchBlockMeta(ctx, l, blockID, tenantID, sm) +} + +// OpenBackendSearchBlock opens the search data for an existing block in the given backend. +func OpenBackendSearchBlock(l *local.Backend, blockID uuid.UUID, tenantID string) *BackendSearchBlock { + return &BackendSearchBlock{ + id: blockID, + tenantID: tenantID, + l: l, + } +} + +// Search iterates through the block looking for matches. +func (s *BackendSearchBlock) Search(ctx context.Context, p Pipeline, sr *Results) error { + var pageBuf []byte + var dataBuf []byte + var pagesBuf [][]byte + indexBuf := []common.Record{{}} + entry := &tempofb.SearchEntry{} // Buffer + + sr.AddBlockInspected() + + meta, err := ReadSearchBlockMeta(ctx, s.l, s.id, s.tenantID) + if err != nil { + return err + } + + vers, err := encoding.FromVersion(meta.Version) + if err != nil { + return err + } + + // Read index + bmeta := backend.NewBlockMeta(s.tenantID, s.id, meta.Version, meta.Encoding, "") + cr := backend.NewContextReader(bmeta, "search-index", backend.NewReader(s.l), false) + + ir, err := vers.NewIndexReader(cr, int(meta.IndexPageSize), int(meta.IndexRecords)) + if err != nil { + return err + } + + dcr := backend.NewContextReader(bmeta, "search", backend.NewReader(s.l), false) + dr, err := vers.NewDataReader(dcr, meta.Encoding) + if err != nil { + return err + } + + or := vers.NewObjectReaderWriter() + + i := -1 + + for !sr.Quit() { + + i++ + + // Next index entry + record, _ := ir.At(ctx, i) + if record == nil { + return nil + } + + indexBuf[0] = *record + pagesBuf, pageBuf, err = dr.Read(ctx, indexBuf, pagesBuf, pageBuf) + if err != nil { + return err + } + + pagesBuf[0], _, dataBuf, err = or.UnmarshalAndAdvanceBuffer(pagesBuf[0]) + if err != nil { + return err + } + + sr.AddBytesInspected(uint64(len(dataBuf))) + + batch := tempofb.GetRootAsSearchPage(dataBuf, 0) + + // Verify something in the batch matches + if !p.MatchesTags(batch) { + // Increment metric still + sr.AddTraceInspected(uint32(batch.EntriesLength())) + continue + } + + l := batch.EntriesLength() + for j := 0; j < l; j++ { + sr.AddTraceInspected(1) + + batch.Entries(entry, j) + + if !p.Matches(entry) { + continue + } + + // If we got here then it's a match. + match := GetSearchResultFromData(entry) + + if quit := sr.AddResult(ctx, match); quit { + return nil + } + } + } + + return nil +} diff --git a/tempodb/search/backend_search_block_test.go b/tempodb/search/backend_search_block_test.go new file mode 100644 index 00000000000..f5b97db73fe --- /dev/null +++ b/tempodb/search/backend_search_block_test.go @@ -0,0 +1,127 @@ +package search + +import ( + "context" + "fmt" + "os" + "path" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/backend/local" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newBackendSearchBlockWithTraces(t testing.TB, traceCount int, enc backend.Encoding, pageSizeBytes int) *BackendSearchBlock { + id := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} // 16-byte ids required + searchData := [][]byte{(&tempofb.SearchEntryMutable{ + Tags: tempofb.SearchDataMap{ + "key1": {"value10", "value11"}, + "key2": {"value20", "value21"}, + "key3": {"value30", "value31"}, + "key4": {"value40", "value41"}, + }}).ToBytes()} + + f, err := os.OpenFile(path.Join(t.TempDir(), "searchdata"), os.O_CREATE|os.O_RDWR, 0644) + require.NoError(t, err) + + b1, err := NewStreamingSearchBlockForFile(f) + require.NoError(t, err) + for i := 0; i < traceCount; i++ { + assert.NoError(t, b1.Append(context.Background(), id, searchData)) + } + + l, err := local.NewBackend(&local.Config{ + Path: t.TempDir(), + }) + require.NoError(t, err) + + blockID := uuid.New() + tenantID := "fake" + err = NewBackendSearchBlock(b1, l, blockID, tenantID, enc, pageSizeBytes) + require.NoError(t, err) + + b2 := OpenBackendSearchBlock(l, blockID, tenantID) + return b2 +} + +func TestBackendSearchBlockSearch(t *testing.T) { + traceCount := 50_000 + + b2 := newBackendSearchBlockWithTraces(t, traceCount, backend.EncNone, 0) + + // Matches every trace + p := NewSearchPipeline(&tempopb.SearchRequest{ + Tags: map[string]string{"key1": "value10"}, + }) + + sr := NewResults() + + sr.StartWorker() + go func() { + defer sr.FinishWorker() + err := b2.Search(context.TODO(), p, sr) + require.NoError(t, err) + }() + sr.AllWorkersStarted() + + var results []*tempopb.TraceSearchMetadata + for r := range sr.Results() { + results = append(results, r) + } + require.Equal(t, traceCount, len(results)) + require.Equal(t, traceCount, int(sr.TracesInspected())) +} + +func BenchmarkBackendSearchBlockSearch(b *testing.B) { + pageSizesMB := []float32{0.5, 1, 2} + + for _, enc := range backend.SupportedEncoding { + for _, sz := range pageSizesMB { + b.Run(fmt.Sprint(enc.String(), "/", sz, "MiB"), func(b *testing.B) { + + b2 := newBackendSearchBlockWithTraces(b, b.N, enc, int(sz*1024*1024)) + + // Matches nothing, will perform an exhaustive search. + p := NewSearchPipeline(&tempopb.SearchRequest{ + Tags: map[string]string{"nomatch": "nomatch"}, + }) + + sr := NewResults() + + b.ResetTimer() + start := time.Now() + // Search 10x10 because reading the search data is much faster than creating it, but we need + // to spend at least 1 second to satisfy go bench minimum elapsed time requirement. + loops := 10 + wg := &sync.WaitGroup{} + for i := 0; i < loops; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < loops; j++ { + err := b2.Search(context.TODO(), p, sr) + require.NoError(b, err) + } + }() + } + wg.Wait() + elapsed := time.Since(start) + + fmt.Printf("BackendSearchBlock search throughput: %v elapsed %.2f MB = %.2f MiB/s \t %d traces = %.2fM traces/s \n", + elapsed, + float64(sr.bytesInspected.Load())/(1024*1024), + float64(sr.bytesInspected.Load())/(elapsed.Seconds())/(1024*1024), + sr.TracesInspected(), + float64(sr.TracesInspected())/(elapsed.Seconds())/1_000_000, + ) + }) + } + } +} diff --git a/tempodb/search/backend_search_block_writer.go b/tempodb/search/backend_search_block_writer.go new file mode 100644 index 00000000000..6e2183360fb --- /dev/null +++ b/tempodb/search/backend_search_block_writer.go @@ -0,0 +1,101 @@ +package search + +import ( + "bytes" + "context" + + "github.com/google/uuid" + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" +) + +// backendSearchBlockWriter is a DataWriter for search data. Instead of receiving bytes slices, it +// receives search data objects and combintes them into a single FlatBuffer Builder and +// flushes periodically, one page per flush. +type backendSearchBlockWriter struct { + // input + blockID uuid.UUID + tenantID string + w backend.RawWriter + + // vars + builder *tempofb.SearchPageBuilder + pageBuf []byte + tracker backend.AppendTracker + finalBuf *bytes.Buffer + dw common.DataWriter +} + +var _ common.DataWriterGeneric = (*backendSearchBlockWriter)(nil) + +func newBackendSearchBlockWriter(blockID uuid.UUID, tenantID string, w backend.RawWriter, v encoding.VersionedEncoding, enc backend.Encoding) (*backendSearchBlockWriter, error) { + finalBuf := &bytes.Buffer{} + + dw, err := v.NewDataWriter(finalBuf, enc) + if err != nil { + return nil, err + } + + return &backendSearchBlockWriter{ + blockID: blockID, + tenantID: tenantID, + w: w, + + pageBuf: make([]byte, 0, 1024*1024), + finalBuf: finalBuf, + builder: tempofb.NewSearchPageBuilder(), + dw: dw, + }, nil +} + +// Write the data to the flatbuffer builder. Input must be a SearchEntryMutable. Returns +// the number of bytes written, which is determined from the current object in the builder. +func (w *backendSearchBlockWriter) Write(ctx context.Context, ID common.ID, i interface{}) (int, error) { + data := i.(*tempofb.SearchEntryMutable) + bytesWritten := w.builder.AddData(data) + return bytesWritten, nil +} + +func (w *backendSearchBlockWriter) CutPage(ctx context.Context) (int, error) { + + // Finish fb page + buf := w.builder.Finish() + + // Write to data writer and cut which will encode/compress + w.finalBuf.Reset() + _, err := w.dw.Write(uuid.Nil[:], buf) + if err != nil { + return 0, err + } + + _, err = w.dw.CutPage() + if err != nil { + return 0, err + } + + w.pageBuf = w.finalBuf.Bytes() + + // Append to backend + w.tracker, err = w.w.Append(ctx, "search", backend.KeyPathForBlock(w.blockID, w.tenantID), w.tracker, w.pageBuf) + if err != nil { + return 0, err + } + + bytesFlushed := len(w.pageBuf) + + // Reset for next page + w.builder.Reset() + + return bytesFlushed, nil +} + +func (w *backendSearchBlockWriter) Complete(ctx context.Context) error { + err := w.dw.Complete() + if err != nil { + return err + } + + return w.w.CloseAppend(ctx, w.tracker) +} diff --git a/tempodb/search/block_meta.go b/tempodb/search/block_meta.go new file mode 100644 index 00000000000..e89db64a12a --- /dev/null +++ b/tempodb/search/block_meta.go @@ -0,0 +1,51 @@ +package search + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/google/uuid" + tempo_io "github.com/grafana/tempo/pkg/io" + "github.com/grafana/tempo/tempodb/backend" +) + +type BlockMeta struct { + Version string `json:"version"` + Encoding backend.Encoding `json:"encoding"` // Encoding/compression format + IndexPageSize uint32 `json:"indexPageSize"` + IndexRecords uint32 `json:"indexRecords"` +} + +const searchMetaObjectName = "search.meta.json" + +func WriteSearchBlockMeta(ctx context.Context, w backend.RawWriter, blockID uuid.UUID, tenantID string, sm *BlockMeta) error { + metaBytes, err := json.Marshal(sm) + if err != nil { + return err + } + + err = w.Write(ctx, searchMetaObjectName, backend.KeyPathForBlock(blockID, tenantID), bytes.NewReader(metaBytes), int64(len(metaBytes)), false) + return err +} + +func ReadSearchBlockMeta(ctx context.Context, r backend.RawReader, blockID uuid.UUID, tenantID string) (*BlockMeta, error) { + metaReader, size, err := r.Read(ctx, searchMetaObjectName, backend.KeyPathForBlock(blockID, tenantID), false) + if err != nil { + return nil, err + } + + defer metaReader.Close() + metaBytes, err := tempo_io.ReadAllWithEstimate(metaReader, size) + if err != nil { + return nil, err + } + + meta := &BlockMeta{} + err = json.Unmarshal(metaBytes, meta) + if err != nil { + return nil, err + } + + return meta, nil +} diff --git a/tempodb/search/pipeline.go b/tempodb/search/pipeline.go new file mode 100644 index 00000000000..ff1a9aa66b7 --- /dev/null +++ b/tempodb/search/pipeline.go @@ -0,0 +1,89 @@ +package search + +import ( + "strings" + "time" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" +) + +type tracefilter func(header *tempofb.SearchEntry) (matches bool) +type tagfilter func(c tempofb.TagContainer) (matches bool) + +type Pipeline struct { + tagfilters []tagfilter + tracefilters []tracefilter +} + +func NewSearchPipeline(req *tempopb.SearchRequest) Pipeline { + p := Pipeline{} + + if req.MinDurationMs > 0 { + minDuration := req.MinDurationMs * uint32(time.Millisecond) + p.tracefilters = append(p.tracefilters, func(s *tempofb.SearchEntry) bool { + return (s.EndTimeUnixNano()-s.StartTimeUnixNano())*uint64(time.Nanosecond) >= uint64(minDuration) + }) + } + + if req.MaxDurationMs > 0 { + maxDuration := req.MaxDurationMs * uint32(time.Millisecond) + p.tracefilters = append(p.tracefilters, func(s *tempofb.SearchEntry) bool { + return (s.EndTimeUnixNano()-s.StartTimeUnixNano())*uint64(time.Nanosecond) <= uint64(maxDuration) + }) + } + + if len(req.Tags) > 0 { + // Convert all search params to bytes once + kb := make([][]byte, 0, len(req.Tags)) + vb := make([][]byte, 0, len(req.Tags)) + + for k, v := range req.Tags { + kb = append(kb, []byte(strings.ToLower(k))) + vb = append(vb, []byte(strings.ToLower(v))) + } + + p.tagfilters = append(p.tagfilters, func(s tempofb.TagContainer) bool { + // Buffer is allocated here so function is thread-safe + buffer := &tempofb.KeyValues{} + + // Must match all + for i := range kb { + if !tempofb.ContainsTag(s, buffer, kb[i], vb[i]) { + return false + } + } + return true + }) + } + + return p +} + +func (p *Pipeline) Matches(header *tempofb.SearchEntry) bool { + + for _, f := range p.tracefilters { + if !f(header) { + return false + } + } + + for _, f := range p.tagfilters { + if !f(header) { + return false + } + } + + return true +} + +func (p *Pipeline) MatchesTags(c tempofb.TagContainer) bool { + + for _, f := range p.tagfilters { + if !f(c) { + return false + } + } + + return true +} diff --git a/tempodb/search/pipeline_test.go b/tempodb/search/pipeline_test.go new file mode 100644 index 00000000000..685b1e70b0d --- /dev/null +++ b/tempodb/search/pipeline_test.go @@ -0,0 +1,122 @@ +package search + +import ( + "testing" + "time" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/stretchr/testify/require" +) + +func TestPipelineMatchesTags(t *testing.T) { + + testCases := []struct { + name string + request map[string]string + searchData tempofb.SearchDataMap + shouldMatch bool + }{ + { + name: "match", + searchData: tempofb.SearchDataMap{"key": {"value"}}, + request: map[string]string{"key": "value"}, + shouldMatch: true, + }, + { + name: "noMatch", + searchData: tempofb.SearchDataMap{"key1": {"value"}}, + request: map[string]string{"key2": "value"}, + shouldMatch: false, + }, + { + name: "matchSubstring", + searchData: tempofb.SearchDataMap{"key": {"avalue"}}, + request: map[string]string{"key": "val"}, + shouldMatch: true, + }, + { + name: "matchMulti", + searchData: tempofb.SearchDataMap{"key1": {"value1"}, "key2": {"value2"}, "key3": {"value3"}, "key4": {"value4"}}, + request: map[string]string{"key1": "value1", "key3": "value3"}, + shouldMatch: true, + }, + { + name: "noMatchMulti", + searchData: tempofb.SearchDataMap{"key1": {"value1"}, "key2": {"value2"}}, + request: map[string]string{"key1": "value1", "key3": "value3"}, + shouldMatch: false, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + p := NewSearchPipeline(&tempopb.SearchRequest{Tags: tc.request}) + data := tempofb.SearchEntryMutable{ + Tags: tc.searchData, + } + sd := tempofb.SearchEntryFromBytes(data.ToBytes()) + matches := p.Matches(sd) + + require.Equal(t, tc.shouldMatch, matches) + }) + } +} + +func BenchmarkPipelineMatches(b *testing.B) { + + entry := tempofb.SearchEntryFromBytes((&tempofb.SearchEntryMutable{ + StartTimeUnixNano: 0, + EndTimeUnixNano: uint64(500 * time.Millisecond / time.Nanosecond), //500ms in nanoseconds + Tags: tempofb.SearchDataMap{ + "key1": {"value10", "value11"}, + "key2": {"value20", "value21"}, + "key3": {"value30", "value31"}, + "key4": {"value40", "value41"}, + }}).ToBytes()) + + testCases := []struct { + name string + req *tempopb.SearchRequest + }{ + { + "match_tag", + &tempopb.SearchRequest{ + Tags: map[string]string{ + "key2": "valu21", + }, + }, + }, + { + "nomatch_tag_minDuration", + &tempopb.SearchRequest{ + MinDurationMs: 501, + Tags: map[string]string{ + "key5": "nomatch", + }, + }, + }, + { + "nomatch_minDuration", + &tempopb.SearchRequest{ + MinDurationMs: 501, + }, + }, + { + "match_minDuration", + &tempopb.SearchRequest{ + MinDurationMs: 499, + }, + }, + } + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + pipeline := NewSearchPipeline(tc.req) + + for i := 0; i < b.N; i++ { + pipeline.Matches(entry) + } + }) + } +} diff --git a/tempodb/search/results.go b/tempodb/search/results.go new file mode 100644 index 00000000000..bc7a2c19688 --- /dev/null +++ b/tempodb/search/results.go @@ -0,0 +1,127 @@ +package search + +import ( + "context" + + "github.com/grafana/tempo/pkg/tempopb" + "go.uber.org/atomic" +) + +// Results eases performing a highly parallel search by funneling all results into a single +// channel that is easy to consume, signaling workers to quit early as needed, and collecting +// metrics. +type Results struct { + resultsCh chan *tempopb.TraceSearchMetadata + doneCh chan struct{} + quit atomic.Bool + started atomic.Bool + workerCount atomic.Int32 + + tracesInspected atomic.Uint32 + bytesInspected atomic.Uint64 + blocksInspected atomic.Uint32 +} + +func NewResults() *Results { + return &Results{ + resultsCh: make(chan *tempopb.TraceSearchMetadata), + doneCh: make(chan struct{}), + } +} + +// AddResult sends a search result from a search task (goroutine) to the receiver of the +// the search results, i.e. the initiator of the search. This function blocks until there +// is buffer space in the results channel or if the task should stop searching because the +// receiver went away or the given context is done. In this case true is returned. +func (sr *Results) AddResult(ctx context.Context, r *tempopb.TraceSearchMetadata) (quit bool) { + select { + case sr.resultsCh <- r: + return false + case <-ctx.Done(): + return true + case <-sr.doneCh: + // This returns immediately once the done channel is closed. + return true + } +} + +// Quit returns if search tasks should quit early. This can occur due to max results +// already found, or other errors such as timeout, etc. +func (sr *Results) Quit() bool { + return sr.quit.Load() +} + +// Results returns the results channel. Channel is closed when the search is complete. +// Can be iterated by range like: +// for res := range sr.Results() +func (sr *Results) Results() <-chan *tempopb.TraceSearchMetadata { + return sr.resultsCh +} + +// Close signals to all workers to quit, when max results is received and no more work is needed. +// Called by the initiator of the search in a defer statement like: +// sr := NewSearchResults() +// defer sr.Close() +func (sr *Results) Close() { + // Closing done channel makes all subsequent and blocked calls to AddResult return + // quit immediately. + close(sr.doneCh) + sr.quit.Store(true) +} + +// StartWorker indicates another sender will be using the results channel. Must be followed +// with a call to FinishWorker which is usually deferred in a goroutine: +// sr.StartWorker() +// go func() { +// defer sr.FinishWorker() +func (sr *Results) StartWorker() { + sr.workerCount.Inc() +} + +// AllWorkersStarted indicates that no more workers (senders) will be launched, and the +// results channel can be closed once the number of workers reaches zero. This function +// call occurs after all calls to StartWorker. +func (sr *Results) AllWorkersStarted() { + sr.started.Store(true) + sr.checkCleanup(sr.workerCount.Load()) +} + +// FinishWorker indicates a sender (goroutine) is done searching and will not +// send any more search results. When the last sender is finished, the results +// channel is closed. +func (sr *Results) FinishWorker() { + newCount := sr.workerCount.Dec() + sr.checkCleanup(newCount) +} + +func (sr *Results) checkCleanup(workerCount int32) { + if sr.started.Load() && workerCount == 0 { + // No more senders. This ends the receiver that is iterating + // the results channel. + close(sr.resultsCh) + } +} + +func (sr *Results) TracesInspected() uint32 { + return sr.tracesInspected.Load() +} + +func (sr *Results) AddTraceInspected(c uint32) { + sr.tracesInspected.Add(c) +} + +func (sr *Results) BytesInspected() uint64 { + return sr.bytesInspected.Load() +} + +func (sr *Results) AddBytesInspected(c uint64) { + sr.bytesInspected.Add(c) +} + +func (sr *Results) AddBlockInspected() { + sr.blocksInspected.Inc() +} + +func (sr *Results) BlocksInspected() uint32 { + return sr.blocksInspected.Load() +} diff --git a/tempodb/search/searchable_block.go b/tempodb/search/searchable_block.go new file mode 100644 index 00000000000..d003fafaa94 --- /dev/null +++ b/tempodb/search/searchable_block.go @@ -0,0 +1,9 @@ +package search + +import ( + "context" +) + +type SearchableBlock interface { + Search(ctx context.Context, p Pipeline, sr *Results) error +} diff --git a/tempodb/search/streaming_search_block.go b/tempodb/search/streaming_search_block.go new file mode 100644 index 00000000000..d2179904dd3 --- /dev/null +++ b/tempodb/search/streaming_search_block.go @@ -0,0 +1,137 @@ +package search + +import ( + "context" + "os" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" +) + +var _ SearchableBlock = (*StreamingSearchBlock)(nil) +var _ common.DataWriter = (*StreamingSearchBlock)(nil) + +// StreamingSearchBlock is search data that is read/write, i.e. for traces in the WAL. +type StreamingSearchBlock struct { + appender encoding.Appender + file *os.File + bytesWritten int +} + +// Clear deletes the files for this block. +func (s *StreamingSearchBlock) Clear() error { + s.file.Close() + return os.Remove(s.file.Name()) +} + +func (*StreamingSearchBlock) Complete() error { + return nil +} + +// CutPage returns the number of bytes written previously so that the appender can build the index. +func (s *StreamingSearchBlock) CutPage() (int, error) { + b := s.bytesWritten + s.bytesWritten = 0 + return b, nil +} + +// Write the entry to the end of the file. The number of bytes written is saved and returned through CutPage. +func (s *StreamingSearchBlock) Write(id common.ID, obj []byte) (int, error) { + var err error + + _, err = s.file.Write(obj) + if err != nil { + return 0, err + } + + s.bytesWritten += len(obj) + + return len(obj), err +} + +// NewStreamingSearchBlockForFile creates a new streaming block that will read/write the given file. +// File must be opened for read/write permissions. +func NewStreamingSearchBlockForFile(f *os.File) (*StreamingSearchBlock, error) { + s := &StreamingSearchBlock{ + file: f, + } + + // Entries are not paged, use non paged appender. + a := encoding.NewAppender(s) + s.appender = a + + return s, nil +} + +// Append the given search data to the streaming block. Multiple byte buffers of search data for +// the same trace can be passed and are merged into one entry. +func (s *StreamingSearchBlock) Append(ctx context.Context, id common.ID, searchData [][]byte) error { + data := tempofb.SearchEntryMutable{} + + kv := &tempofb.KeyValues{} + + // Squash all datas into 1 + for _, sb := range searchData { + sd := tempofb.SearchEntryFromBytes(sb) + for i := 0; i < sd.TagsLength(); i++ { + sd.Tags(kv, i) + for j := 0; j < kv.ValueLength(); j++ { + data.AddTag(string(kv.Key()), string(kv.Value(j))) + } + } + data.SetStartTimeUnixNano(sd.StartTimeUnixNano()) + data.SetEndTimeUnixNano(sd.EndTimeUnixNano()) + } + data.TraceID = id + + buf := data.ToBytes() + + return s.appender.Append(id, buf) +} + +// Search the streaming block. +func (s *StreamingSearchBlock) Search(ctx context.Context, p Pipeline, sr *Results) error { + + var buf []byte + + sr.AddBlockInspected() + + rr := s.appender.Records() + + for _, r := range rr { + + if sr.Quit() { + return nil + } + + // Reset/resize buffer + if cap(buf) < int(r.Length) { + buf = make([]byte, r.Length) + } + buf = buf[:r.Length] + + _, err := s.file.ReadAt(buf, int64(r.Start)) + if err != nil { + return err + } + + sr.AddBytesInspected(uint64(r.Length)) + sr.AddTraceInspected(1) + + entry := tempofb.SearchEntryFromBytes(buf) + + if !p.Matches(entry) { + continue + } + + // If we got here then it's a match. + match := GetSearchResultFromData(entry) + + if quit := sr.AddResult(ctx, match); quit { + return nil + } + } + + return nil +} diff --git a/tempodb/search/streaming_search_block_test.go b/tempodb/search/streaming_search_block_test.go new file mode 100644 index 00000000000..05350f218fd --- /dev/null +++ b/tempodb/search/streaming_search_block_test.go @@ -0,0 +1,101 @@ +package search + +import ( + "context" + "fmt" + "os" + "path" + "sync" + "testing" + "time" + + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/stretchr/testify/require" +) + +func newStreamingSearchBlockWithTraces(traceCount int, t testing.TB) *StreamingSearchBlock { + id := []byte{1, 2, 3, 4, 5, 6, 7, 8} + searchData := [][]byte{(&tempofb.SearchEntryMutable{ + Tags: tempofb.SearchDataMap{ + "key1": {"value10", "value11"}, + "key2": {"value20", "value21"}, + "key3": {"value30", "value31"}, + "key4": {"value40", "value41"}, + }}).ToBytes()} + + f, err := os.OpenFile(path.Join(t.TempDir(), "searchdata"), os.O_CREATE|os.O_RDWR, 0644) + require.NoError(t, err) + + sb, err := NewStreamingSearchBlockForFile(f) + require.NoError(t, err) + + for i := 0; i < traceCount; i++ { + require.NoError(t, sb.Append(context.Background(), id, searchData)) + } + + return sb +} + +func TestStreamingSearchBlockSearch(t *testing.T) { + traceCount := 10 + + sb := newStreamingSearchBlockWithTraces(traceCount, t) + + // Matches every trace + p := NewSearchPipeline(&tempopb.SearchRequest{ + Tags: map[string]string{"key1": "value10"}, + }) + + sr := NewResults() + + sr.StartWorker() + go func() { + defer sr.FinishWorker() + err := sb.Search(context.TODO(), p, sr) + require.NoError(t, err) + }() + sr.AllWorkersStarted() + + var results []*tempopb.TraceSearchMetadata + for r := range sr.Results() { + results = append(results, r) + } + require.Equal(t, traceCount, len(results)) + require.Equal(t, traceCount, int(sr.TracesInspected())) +} + +func BenchmarkStreamingSearchBlockSearch(b *testing.B) { + + sb := newStreamingSearchBlockWithTraces(b.N, b) + + p := NewSearchPipeline(&tempopb.SearchRequest{ + Tags: map[string]string{"nomatch": "nomatch"}, + }) + + sr := NewResults() + + b.ResetTimer() + start := time.Now() + // Search 10x10 because reading the search data is much faster than creating it, but we need + // to spend at least 1 second to satisfy go bench minimum elapsed time requirement. + loops := 10 + wg := &sync.WaitGroup{} + for i := 0; i < loops; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < loops; j++ { + err := sb.Search(context.TODO(), p, sr) + require.NoError(b, err) + } + }() + } + wg.Wait() + elapsed := time.Since(start) + + fmt.Printf("StreamingSearchBlock search throughput: %v elapsed %.2f MB = %.2f MiB/s throughput \n", + elapsed, + float64(sr.bytesInspected.Load())/(1024*1024), + float64(sr.bytesInspected.Load())/(elapsed.Seconds())/(1024*1024)) +} diff --git a/tempodb/search/tag_cache.go b/tempodb/search/tag_cache.go new file mode 100644 index 00000000000..423204c8369 --- /dev/null +++ b/tempodb/search/tag_cache.go @@ -0,0 +1,120 @@ +package search + +import ( + "math" + "sort" + "sync" + "time" + + "github.com/grafana/tempo/pkg/tempofb" +) + +type CacheEntry struct { + values map[string]int64 // value -> unix timestamp +} + +const maxValuesPerTag = 50 + +type TagCache struct { + lookups map[string]*CacheEntry + mtx sync.RWMutex +} + +func NewTagCache() *TagCache { + return &TagCache{ + lookups: map[string]*CacheEntry{}, + } +} + +func (s *TagCache) GetNames() []string { + s.mtx.RLock() + tags := make([]string, 0, len(s.lookups)) + for k := range s.lookups { + tags = append(tags, k) + } + s.mtx.RUnlock() + + sort.Strings(tags) + return tags +} + +func (s *TagCache) GetValues(tagName string) []string { + var vals []string + + s.mtx.RLock() + if e := s.lookups[tagName]; e != nil { + vals = make([]string, 0, len(e.values)) + for v := range e.values { + vals = append(vals, v) + } + } + s.mtx.RUnlock() + + sort.Strings(vals) + return vals +} + +func (s *TagCache) SetData(ts time.Time, data *tempofb.SearchEntry) { + s.mtx.Lock() + defer s.mtx.Unlock() + + tsUnix := ts.Unix() + kv := &tempofb.KeyValues{} + + l := data.TagsLength() + for j := 0; j < l; j++ { + data.Tags(kv, j) + key := string(kv.Key()) + l2 := kv.ValueLength() + for k := 0; k < l2; k++ { + s.setEntry(tsUnix, key, string(kv.Value(k))) + } + } +} + +// setEntry should be called under lock. +func (s *TagCache) setEntry(ts int64, k, v string) { + e := s.lookups[k] + if e == nil { + // First entry + s.lookups[k] = &CacheEntry{values: map[string]int64{v: ts}} + return + } + + // Prune oldest as needed + for len(e.values) >= maxValuesPerTag { + earliestv := "" + earliestts := int64(math.MaxInt64) + + for v, ts := range e.values { + if ts < earliestts { + earliestv = v + earliestts = ts + } + } + + delete(e.values, earliestv) + } + + e.values[v] = ts +} + +func (s *TagCache) PurgeExpired(before time.Time) { + s.mtx.Lock() + defer s.mtx.Unlock() + + beforeUnix := before.Unix() + + for k, e := range s.lookups { + for v, ts := range e.values { + if ts < beforeUnix { + delete(e.values, v) + } + } + + // Remove tags when all values deleted + if len(e.values) <= 0 { + delete(s.lookups, k) + } + } +} diff --git a/tempodb/search/tag_cache_test.go b/tempodb/search/tag_cache_test.go new file mode 100644 index 00000000000..913a5e6cdca --- /dev/null +++ b/tempodb/search/tag_cache_test.go @@ -0,0 +1,55 @@ +package search + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSearchTagCacheGetNames(t *testing.T) { + c := NewTagCache() + c.setEntry(0, "k1", "v1") + c.setEntry(0, "k1", "v2") + require.Equal(t, []string{"k1"}, c.GetNames()) +} + +func TestSearchTagCacheMaxValuesPerTag(t *testing.T) { + c := NewTagCache() + + for i := 0; i < maxValuesPerTag+1; i++ { + c.setEntry(int64(i), "k", fmt.Sprintf("v%02d", i)) + } + + vals := c.GetValues("k") + + require.Len(t, vals, maxValuesPerTag) + require.Equal(t, "v01", vals[0]) // oldest v0 was evicted + require.Equal(t, fmt.Sprintf("v%02d", maxValuesPerTag), vals[maxValuesPerTag-1]) +} + +func TestSearchTagCachePurge(t *testing.T) { + c := NewTagCache() + + oneMinuteAgo := time.Now().Add(-1 * time.Minute) + twoMinutesAgo := time.Now().Add(-2 * time.Minute) + + c.setEntry(twoMinutesAgo.Unix(), "j", "a") + c.setEntry(twoMinutesAgo.Unix(), "k", "a") + c.setEntry(oneMinuteAgo.Unix(), "k", "b") + + c.PurgeExpired(oneMinuteAgo) + + require.Equal(t, []string{"k"}, c.GetNames()) // Empty tags purged + require.Equal(t, []string{"b"}, c.GetValues("k")) // Old values purged +} + +func BenchmarkSearchTagCacheSetEntry(b *testing.B) { + c := NewTagCache() + + for i := 0; i < b.N; i++ { + c.setEntry(int64(i), "k", strconv.Itoa(b.N)) + } +} diff --git a/tempodb/search/util.go b/tempodb/search/util.go new file mode 100644 index 00000000000..680a9f22376 --- /dev/null +++ b/tempodb/search/util.go @@ -0,0 +1,52 @@ +package search + +import ( + "github.com/grafana/tempo/pkg/tempofb" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/util" +) + +const ( + RootServiceNameTag = "root.service.name" + ServiceNameTag = "service.name" + RootSpanPrefix = "root." + RootSpanNameTag = "root.name" + SpanNameTag = "name" +) + +func GetSearchResultFromData(s *tempofb.SearchEntry) *tempopb.TraceSearchMetadata { + return &tempopb.TraceSearchMetadata{ + TraceID: util.TraceIDToHexString(s.Id()), + RootServiceName: s.Get(RootServiceNameTag), + RootTraceName: s.Get(RootSpanNameTag), + StartTimeUnixNano: s.StartTimeUnixNano(), + DurationMs: uint32((s.EndTimeUnixNano() - s.StartTimeUnixNano()) / 1_000_000), + } +} + +// CombineResults overlays the incoming search result with the existing result. This is required +// for the following reason: a trace may be present in multiple blocks, or in partial segments +// in live traces. The results should reflect elements of all segments. +func CombineSearchResults(existing *tempopb.TraceSearchMetadata, incoming *tempopb.TraceSearchMetadata) { + if existing.TraceID == "" { + existing.TraceID = incoming.TraceID + } + + if existing.RootServiceName == "" { + existing.RootServiceName = incoming.RootServiceName + } + + if existing.RootTraceName == "" { + existing.RootTraceName = incoming.RootTraceName + } + + // Earliest start time. + if existing.StartTimeUnixNano > incoming.StartTimeUnixNano { + existing.StartTimeUnixNano = incoming.StartTimeUnixNano + } + + // Longest duration + if existing.DurationMs < incoming.DurationMs { + existing.DurationMs = incoming.DurationMs + } +} diff --git a/tempodb/wal/wal.go b/tempodb/wal/wal.go index 3595df28c55..9b3c37d9658 100644 --- a/tempodb/wal/wal.go +++ b/tempodb/wal/wal.go @@ -129,6 +129,20 @@ func (w *WAL) NewBlock(id uuid.UUID, tenantID string, dataEncoding string) (*App return newAppendBlock(id, tenantID, w.c.Filepath, w.c.Encoding, dataEncoding) } +func (w *WAL) NewFile(blockid uuid.UUID, tenantid string, dir string, name string) (*os.File, error) { + p := filepath.Join(w.c.Filepath, dir) + err := os.MkdirAll(p, os.ModePerm) + if err != nil { + return nil, err + } + return os.OpenFile(filepath.Join(p, fmt.Sprintf("%v:%v:%v", blockid, tenantid, name)), os.O_CREATE|os.O_RDWR, 0644) +} + +func (w *WAL) ClearFolder(dir string) error { + p := filepath.Join(w.c.Filepath, dir) + return os.RemoveAll(p) +} + func (w *WAL) LocalBackend() *local.Backend { return w.l } diff --git a/vendor/github.com/google/flatbuffers/LICENSE.txt b/vendor/github.com/google/flatbuffers/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/flatbuffers/go/BUILD.bazel b/vendor/github.com/google/flatbuffers/go/BUILD.bazel new file mode 100644 index 00000000000..78bd8d81ada --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +alias( + name = "go_default_library", + actual = ":go", + visibility = ["//visibility:public"], +) + +go_library( + name = "go", + srcs = [ + "builder.go", + "doc.go", + "encode.go", + "grpc.go", + "lib.go", + "sizes.go", + "struct.go", + "table.go", + ], + importpath = "github.com/google/flatbuffers/go", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/flatbuffers/go/builder.go b/vendor/github.com/google/flatbuffers/go/builder.go new file mode 100644 index 00000000000..d99b590bb2a --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/builder.go @@ -0,0 +1,835 @@ +package flatbuffers + +// Builder is a state machine for creating FlatBuffer objects. +// Use a Builder to construct object(s) starting from leaf nodes. +// +// A Builder constructs byte buffers in a last-first manner for simplicity and +// performance. +type Builder struct { + // `Bytes` gives raw access to the buffer. Most users will want to use + // FinishedBytes() instead. + Bytes []byte + + minalign int + vtable []UOffsetT + objectEnd UOffsetT + vtables []UOffsetT + head UOffsetT + nested bool + finished bool + + sharedStrings map[string]UOffsetT +} + +const fileIdentifierLength = 4 +const sizePrefixLength = 4 + +// NewBuilder initializes a Builder of size `initial_size`. +// The internal buffer is grown as needed. +func NewBuilder(initialSize int) *Builder { + if initialSize <= 0 { + initialSize = 0 + } + + b := &Builder{} + b.Bytes = make([]byte, initialSize) + b.head = UOffsetT(initialSize) + b.minalign = 1 + b.vtables = make([]UOffsetT, 0, 16) // sensible default capacity + return b +} + +// Reset truncates the underlying Builder buffer, facilitating alloc-free +// reuse of a Builder. It also resets bookkeeping data. +func (b *Builder) Reset() { + if b.Bytes != nil { + b.Bytes = b.Bytes[:cap(b.Bytes)] + } + + if b.vtables != nil { + b.vtables = b.vtables[:0] + } + + if b.vtable != nil { + b.vtable = b.vtable[:0] + } + + if b.sharedStrings != nil { + for key := range b.sharedStrings { + delete(b.sharedStrings, key) + } + } + + b.head = UOffsetT(len(b.Bytes)) + b.minalign = 1 + b.nested = false + b.finished = false +} + +// FinishedBytes returns a pointer to the written data in the byte buffer. +// Panics if the builder is not in a finished state (which is caused by calling +// `Finish()`). +func (b *Builder) FinishedBytes() []byte { + b.assertFinished() + return b.Bytes[b.Head():] +} + +// StartObject initializes bookkeeping for writing a new object. +func (b *Builder) StartObject(numfields int) { + b.assertNotNested() + b.nested = true + + // use 32-bit offsets so that arithmetic doesn't overflow. + if cap(b.vtable) < numfields || b.vtable == nil { + b.vtable = make([]UOffsetT, numfields) + } else { + b.vtable = b.vtable[:numfields] + for i := 0; i < len(b.vtable); i++ { + b.vtable[i] = 0 + } + } + + b.objectEnd = b.Offset() +} + +// WriteVtable serializes the vtable for the current object, if applicable. +// +// Before writing out the vtable, this checks pre-existing vtables for equality +// to this one. If an equal vtable is found, point the object to the existing +// vtable and return. +// +// Because vtable values are sensitive to alignment of object data, not all +// logically-equal vtables will be deduplicated. +// +// A vtable has the following format: +// +// +// * N, where N is the number of fields in +// the schema for this type. Includes deprecated fields. +// Thus, a vtable is made of 2 + N elements, each SizeVOffsetT bytes wide. +// +// An object has the following format: +// +// + +func (b *Builder) WriteVtable() (n UOffsetT) { + // Prepend a zero scalar to the object. Later in this function we'll + // write an offset here that points to the object's vtable: + b.PrependSOffsetT(0) + + objectOffset := b.Offset() + existingVtable := UOffsetT(0) + + // Trim vtable of trailing zeroes. + i := len(b.vtable) - 1 + for ; i >= 0 && b.vtable[i] == 0; i-- { + } + b.vtable = b.vtable[:i+1] + + // Search backwards through existing vtables, because similar vtables + // are likely to have been recently appended. See + // BenchmarkVtableDeduplication for a case in which this heuristic + // saves about 30% of the time used in writing objects with duplicate + // tables. + for i := len(b.vtables) - 1; i >= 0; i-- { + // Find the other vtable, which is associated with `i`: + vt2Offset := b.vtables[i] + vt2Start := len(b.Bytes) - int(vt2Offset) + vt2Len := GetVOffsetT(b.Bytes[vt2Start:]) + + metadata := VtableMetadataFields * SizeVOffsetT + vt2End := vt2Start + int(vt2Len) + vt2 := b.Bytes[vt2Start+metadata : vt2End] + + // Compare the other vtable to the one under consideration. + // If they are equal, store the offset and break: + if vtableEqual(b.vtable, objectOffset, vt2) { + existingVtable = vt2Offset + break + } + } + + if existingVtable == 0 { + // Did not find a vtable, so write this one to the buffer. + + // Write out the current vtable in reverse , because + // serialization occurs in last-first order: + for i := len(b.vtable) - 1; i >= 0; i-- { + var off UOffsetT + if b.vtable[i] != 0 { + // Forward reference to field; + // use 32bit number to assert no overflow: + off = objectOffset - b.vtable[i] + } + + b.PrependVOffsetT(VOffsetT(off)) + } + + // The two metadata fields are written last. + + // First, store the object bytesize: + objectSize := objectOffset - b.objectEnd + b.PrependVOffsetT(VOffsetT(objectSize)) + + // Second, store the vtable bytesize: + vBytes := (len(b.vtable) + VtableMetadataFields) * SizeVOffsetT + b.PrependVOffsetT(VOffsetT(vBytes)) + + // Next, write the offset to the new vtable in the + // already-allocated SOffsetT at the beginning of this object: + objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset) + WriteSOffsetT(b.Bytes[objectStart:], + SOffsetT(b.Offset())-SOffsetT(objectOffset)) + + // Finally, store this vtable in memory for future + // deduplication: + b.vtables = append(b.vtables, b.Offset()) + } else { + // Found a duplicate vtable. + + objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset) + b.head = UOffsetT(objectStart) + + // Write the offset to the found vtable in the + // already-allocated SOffsetT at the beginning of this object: + WriteSOffsetT(b.Bytes[b.head:], + SOffsetT(existingVtable)-SOffsetT(objectOffset)) + } + + b.vtable = b.vtable[:0] + return objectOffset +} + +// EndObject writes data necessary to finish object construction. +func (b *Builder) EndObject() UOffsetT { + b.assertNested() + n := b.WriteVtable() + b.nested = false + return n +} + +// Doubles the size of the byteslice, and copies the old data towards the +// end of the new byteslice (since we build the buffer backwards). +func (b *Builder) growByteBuffer() { + if (int64(len(b.Bytes)) & int64(0xC0000000)) != 0 { + panic("cannot grow buffer beyond 2 gigabytes") + } + newLen := len(b.Bytes) * 2 + if newLen == 0 { + newLen = 1 + } + + if cap(b.Bytes) >= newLen { + b.Bytes = b.Bytes[:newLen] + } else { + extension := make([]byte, newLen-len(b.Bytes)) + b.Bytes = append(b.Bytes, extension...) + } + + middle := newLen / 2 + copy(b.Bytes[middle:], b.Bytes[:middle]) +} + +// Head gives the start of useful data in the underlying byte buffer. +// Note: unlike other functions, this value is interpreted as from the left. +func (b *Builder) Head() UOffsetT { + return b.head +} + +// Offset relative to the end of the buffer. +func (b *Builder) Offset() UOffsetT { + return UOffsetT(len(b.Bytes)) - b.head +} + +// Pad places zeros at the current offset. +func (b *Builder) Pad(n int) { + for i := 0; i < n; i++ { + b.PlaceByte(0) + } +} + +// Prep prepares to write an element of `size` after `additional_bytes` +// have been written, e.g. if you write a string, you need to align such +// the int length field is aligned to SizeInt32, and the string data follows it +// directly. +// If all you need to do is align, `additionalBytes` will be 0. +func (b *Builder) Prep(size, additionalBytes int) { + // Track the biggest thing we've ever aligned to. + if size > b.minalign { + b.minalign = size + } + // Find the amount of alignment needed such that `size` is properly + // aligned after `additionalBytes`: + alignSize := (^(len(b.Bytes) - int(b.Head()) + additionalBytes)) + 1 + alignSize &= (size - 1) + + // Reallocate the buffer if needed: + for int(b.head) <= alignSize+size+additionalBytes { + oldBufSize := len(b.Bytes) + b.growByteBuffer() + b.head += UOffsetT(len(b.Bytes) - oldBufSize) + } + b.Pad(alignSize) +} + +// PrependSOffsetT prepends an SOffsetT, relative to where it will be written. +func (b *Builder) PrependSOffsetT(off SOffsetT) { + b.Prep(SizeSOffsetT, 0) // Ensure alignment is already done. + if !(UOffsetT(off) <= b.Offset()) { + panic("unreachable: off <= b.Offset()") + } + off2 := SOffsetT(b.Offset()) - off + SOffsetT(SizeSOffsetT) + b.PlaceSOffsetT(off2) +} + +// PrependUOffsetT prepends an UOffsetT, relative to where it will be written. +func (b *Builder) PrependUOffsetT(off UOffsetT) { + b.Prep(SizeUOffsetT, 0) // Ensure alignment is already done. + if !(off <= b.Offset()) { + panic("unreachable: off <= b.Offset()") + } + off2 := b.Offset() - off + UOffsetT(SizeUOffsetT) + b.PlaceUOffsetT(off2) +} + +// StartVector initializes bookkeeping for writing a new vector. +// +// A vector has the following format: +// +// +, where T is the type of elements of this vector. +func (b *Builder) StartVector(elemSize, numElems, alignment int) UOffsetT { + b.assertNotNested() + b.nested = true + b.Prep(SizeUint32, elemSize*numElems) + b.Prep(alignment, elemSize*numElems) // Just in case alignment > int. + return b.Offset() +} + +// EndVector writes data necessary to finish vector construction. +func (b *Builder) EndVector(vectorNumElems int) UOffsetT { + b.assertNested() + + // we already made space for this, so write without PrependUint32 + b.PlaceUOffsetT(UOffsetT(vectorNumElems)) + + b.nested = false + return b.Offset() +} + +// CreateSharedString Checks if the string is already written +// to the buffer before calling CreateString +func (b *Builder) CreateSharedString(s string) UOffsetT { + if b.sharedStrings == nil { + b.sharedStrings = make(map[string]UOffsetT) + } + if v, ok := b.sharedStrings[s]; ok { + return v + } + off := b.CreateString(s) + b.sharedStrings[s] = off + return off +} + +// CreateString writes a null-terminated string as a vector. +func (b *Builder) CreateString(s string) UOffsetT { + b.assertNotNested() + b.nested = true + + b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte) + b.PlaceByte(0) + + l := UOffsetT(len(s)) + + b.head -= l + copy(b.Bytes[b.head:b.head+l], s) + + return b.EndVector(len(s)) +} + +// CreateByteString writes a byte slice as a string (null-terminated). +func (b *Builder) CreateByteString(s []byte) UOffsetT { + b.assertNotNested() + b.nested = true + + b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte) + b.PlaceByte(0) + + l := UOffsetT(len(s)) + + b.head -= l + copy(b.Bytes[b.head:b.head+l], s) + + return b.EndVector(len(s)) +} + +// CreateByteVector writes a ubyte vector +func (b *Builder) CreateByteVector(v []byte) UOffsetT { + b.assertNotNested() + b.nested = true + + b.Prep(int(SizeUOffsetT), len(v)*SizeByte) + + l := UOffsetT(len(v)) + + b.head -= l + copy(b.Bytes[b.head:b.head+l], v) + + return b.EndVector(len(v)) +} + +func (b *Builder) assertNested() { + // If you get this assert, you're in an object while trying to write + // data that belongs outside of an object. + // To fix this, write non-inline data (like vectors) before creating + // objects. + if !b.nested { + panic("Incorrect creation order: must be inside object.") + } +} + +func (b *Builder) assertNotNested() { + // If you hit this, you're trying to construct a Table/Vector/String + // during the construction of its parent table (between the MyTableBuilder + // and builder.Finish()). + // Move the creation of these sub-objects to above the MyTableBuilder to + // not get this assert. + // Ignoring this assert may appear to work in simple cases, but the reason + // it is here is that storing objects in-line may cause vtable offsets + // to not fit anymore. It also leads to vtable duplication. + if b.nested { + panic("Incorrect creation order: object must not be nested.") + } +} + +func (b *Builder) assertFinished() { + // If you get this assert, you're attempting to get access a buffer + // which hasn't been finished yet. Be sure to call builder.Finish() + // with your root table. + // If you really need to access an unfinished buffer, use the Bytes + // buffer directly. + if !b.finished { + panic("Incorrect use of FinishedBytes(): must call 'Finish' first.") + } +} + +// PrependBoolSlot prepends a bool onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependBoolSlot(o int, x, d bool) { + val := byte(0) + if x { + val = 1 + } + def := byte(0) + if d { + def = 1 + } + b.PrependByteSlot(o, val, def) +} + +// PrependByteSlot prepends a byte onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependByteSlot(o int, x, d byte) { + if x != d { + b.PrependByte(x) + b.Slot(o) + } +} + +// PrependUint8Slot prepends a uint8 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint8Slot(o int, x, d uint8) { + if x != d { + b.PrependUint8(x) + b.Slot(o) + } +} + +// PrependUint16Slot prepends a uint16 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint16Slot(o int, x, d uint16) { + if x != d { + b.PrependUint16(x) + b.Slot(o) + } +} + +// PrependUint32Slot prepends a uint32 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint32Slot(o int, x, d uint32) { + if x != d { + b.PrependUint32(x) + b.Slot(o) + } +} + +// PrependUint64Slot prepends a uint64 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUint64Slot(o int, x, d uint64) { + if x != d { + b.PrependUint64(x) + b.Slot(o) + } +} + +// PrependInt8Slot prepends a int8 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt8Slot(o int, x, d int8) { + if x != d { + b.PrependInt8(x) + b.Slot(o) + } +} + +// PrependInt16Slot prepends a int16 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt16Slot(o int, x, d int16) { + if x != d { + b.PrependInt16(x) + b.Slot(o) + } +} + +// PrependInt32Slot prepends a int32 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt32Slot(o int, x, d int32) { + if x != d { + b.PrependInt32(x) + b.Slot(o) + } +} + +// PrependInt64Slot prepends a int64 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependInt64Slot(o int, x, d int64) { + if x != d { + b.PrependInt64(x) + b.Slot(o) + } +} + +// PrependFloat32Slot prepends a float32 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependFloat32Slot(o int, x, d float32) { + if x != d { + b.PrependFloat32(x) + b.Slot(o) + } +} + +// PrependFloat64Slot prepends a float64 onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependFloat64Slot(o int, x, d float64) { + if x != d { + b.PrependFloat64(x) + b.Slot(o) + } +} + +// PrependUOffsetTSlot prepends an UOffsetT onto the object at vtable slot `o`. +// If value `x` equals default `d`, then the slot will be set to zero and no +// other data will be written. +func (b *Builder) PrependUOffsetTSlot(o int, x, d UOffsetT) { + if x != d { + b.PrependUOffsetT(x) + b.Slot(o) + } +} + +// PrependStructSlot prepends a struct onto the object at vtable slot `o`. +// Structs are stored inline, so nothing additional is being added. +// In generated code, `d` is always 0. +func (b *Builder) PrependStructSlot(voffset int, x, d UOffsetT) { + if x != d { + b.assertNested() + if x != b.Offset() { + panic("inline data write outside of object") + } + b.Slot(voffset) + } +} + +// Slot sets the vtable key `voffset` to the current location in the buffer. +func (b *Builder) Slot(slotnum int) { + b.vtable[slotnum] = UOffsetT(b.Offset()) +} + +// FinishWithFileIdentifier finalizes a buffer, pointing to the given `rootTable`. +// as well as applys a file identifier +func (b *Builder) FinishWithFileIdentifier(rootTable UOffsetT, fid []byte) { + if fid == nil || len(fid) != fileIdentifierLength { + panic("incorrect file identifier length") + } + // In order to add a file identifier to the flatbuffer message, we need + // to prepare an alignment and file identifier length + b.Prep(b.minalign, SizeInt32+fileIdentifierLength) + for i := fileIdentifierLength - 1; i >= 0; i-- { + // place the file identifier + b.PlaceByte(fid[i]) + } + // finish + b.Finish(rootTable) +} + +// FinishSizePrefixed finalizes a buffer, pointing to the given `rootTable`. +// The buffer is prefixed with the size of the buffer, excluding the size +// of the prefix itself. +func (b *Builder) FinishSizePrefixed(rootTable UOffsetT) { + b.finish(rootTable, true) +} + +// FinishSizePrefixedWithFileIdentifier finalizes a buffer, pointing to the given `rootTable` +// and applies a file identifier. The buffer is prefixed with the size of the buffer, +// excluding the size of the prefix itself. +func (b *Builder) FinishSizePrefixedWithFileIdentifier(rootTable UOffsetT, fid []byte) { + if fid == nil || len(fid) != fileIdentifierLength { + panic("incorrect file identifier length") + } + // In order to add a file identifier and size prefix to the flatbuffer message, + // we need to prepare an alignment, a size prefix length, and file identifier length + b.Prep(b.minalign, SizeInt32+fileIdentifierLength+sizePrefixLength) + for i := fileIdentifierLength - 1; i >= 0; i-- { + // place the file identifier + b.PlaceByte(fid[i]) + } + // finish + b.finish(rootTable, true) +} + +// Finish finalizes a buffer, pointing to the given `rootTable`. +func (b *Builder) Finish(rootTable UOffsetT) { + b.finish(rootTable, false) +} + +// finish finalizes a buffer, pointing to the given `rootTable` +// with an optional size prefix. +func (b *Builder) finish(rootTable UOffsetT, sizePrefix bool) { + b.assertNotNested() + + if sizePrefix { + b.Prep(b.minalign, SizeUOffsetT+sizePrefixLength) + } else { + b.Prep(b.minalign, SizeUOffsetT) + } + + b.PrependUOffsetT(rootTable) + + if sizePrefix { + b.PlaceUint32(uint32(b.Offset())) + } + + b.finished = true +} + +// vtableEqual compares an unwritten vtable to a written vtable. +func vtableEqual(a []UOffsetT, objectStart UOffsetT, b []byte) bool { + if len(a)*SizeVOffsetT != len(b) { + return false + } + + for i := 0; i < len(a); i++ { + x := GetVOffsetT(b[i*SizeVOffsetT : (i+1)*SizeVOffsetT]) + + // Skip vtable entries that indicate a default value. + if x == 0 && a[i] == 0 { + continue + } + + y := SOffsetT(objectStart) - SOffsetT(a[i]) + if SOffsetT(x) != y { + return false + } + } + return true +} + +// PrependBool prepends a bool to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependBool(x bool) { + b.Prep(SizeBool, 0) + b.PlaceBool(x) +} + +// PrependUint8 prepends a uint8 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint8(x uint8) { + b.Prep(SizeUint8, 0) + b.PlaceUint8(x) +} + +// PrependUint16 prepends a uint16 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint16(x uint16) { + b.Prep(SizeUint16, 0) + b.PlaceUint16(x) +} + +// PrependUint32 prepends a uint32 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint32(x uint32) { + b.Prep(SizeUint32, 0) + b.PlaceUint32(x) +} + +// PrependUint64 prepends a uint64 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependUint64(x uint64) { + b.Prep(SizeUint64, 0) + b.PlaceUint64(x) +} + +// PrependInt8 prepends a int8 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt8(x int8) { + b.Prep(SizeInt8, 0) + b.PlaceInt8(x) +} + +// PrependInt16 prepends a int16 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt16(x int16) { + b.Prep(SizeInt16, 0) + b.PlaceInt16(x) +} + +// PrependInt32 prepends a int32 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt32(x int32) { + b.Prep(SizeInt32, 0) + b.PlaceInt32(x) +} + +// PrependInt64 prepends a int64 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependInt64(x int64) { + b.Prep(SizeInt64, 0) + b.PlaceInt64(x) +} + +// PrependFloat32 prepends a float32 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependFloat32(x float32) { + b.Prep(SizeFloat32, 0) + b.PlaceFloat32(x) +} + +// PrependFloat64 prepends a float64 to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependFloat64(x float64) { + b.Prep(SizeFloat64, 0) + b.PlaceFloat64(x) +} + +// PrependByte prepends a byte to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependByte(x byte) { + b.Prep(SizeByte, 0) + b.PlaceByte(x) +} + +// PrependVOffsetT prepends a VOffsetT to the Builder buffer. +// Aligns and checks for space. +func (b *Builder) PrependVOffsetT(x VOffsetT) { + b.Prep(SizeVOffsetT, 0) + b.PlaceVOffsetT(x) +} + +// PlaceBool prepends a bool to the Builder, without checking for space. +func (b *Builder) PlaceBool(x bool) { + b.head -= UOffsetT(SizeBool) + WriteBool(b.Bytes[b.head:], x) +} + +// PlaceUint8 prepends a uint8 to the Builder, without checking for space. +func (b *Builder) PlaceUint8(x uint8) { + b.head -= UOffsetT(SizeUint8) + WriteUint8(b.Bytes[b.head:], x) +} + +// PlaceUint16 prepends a uint16 to the Builder, without checking for space. +func (b *Builder) PlaceUint16(x uint16) { + b.head -= UOffsetT(SizeUint16) + WriteUint16(b.Bytes[b.head:], x) +} + +// PlaceUint32 prepends a uint32 to the Builder, without checking for space. +func (b *Builder) PlaceUint32(x uint32) { + b.head -= UOffsetT(SizeUint32) + WriteUint32(b.Bytes[b.head:], x) +} + +// PlaceUint64 prepends a uint64 to the Builder, without checking for space. +func (b *Builder) PlaceUint64(x uint64) { + b.head -= UOffsetT(SizeUint64) + WriteUint64(b.Bytes[b.head:], x) +} + +// PlaceInt8 prepends a int8 to the Builder, without checking for space. +func (b *Builder) PlaceInt8(x int8) { + b.head -= UOffsetT(SizeInt8) + WriteInt8(b.Bytes[b.head:], x) +} + +// PlaceInt16 prepends a int16 to the Builder, without checking for space. +func (b *Builder) PlaceInt16(x int16) { + b.head -= UOffsetT(SizeInt16) + WriteInt16(b.Bytes[b.head:], x) +} + +// PlaceInt32 prepends a int32 to the Builder, without checking for space. +func (b *Builder) PlaceInt32(x int32) { + b.head -= UOffsetT(SizeInt32) + WriteInt32(b.Bytes[b.head:], x) +} + +// PlaceInt64 prepends a int64 to the Builder, without checking for space. +func (b *Builder) PlaceInt64(x int64) { + b.head -= UOffsetT(SizeInt64) + WriteInt64(b.Bytes[b.head:], x) +} + +// PlaceFloat32 prepends a float32 to the Builder, without checking for space. +func (b *Builder) PlaceFloat32(x float32) { + b.head -= UOffsetT(SizeFloat32) + WriteFloat32(b.Bytes[b.head:], x) +} + +// PlaceFloat64 prepends a float64 to the Builder, without checking for space. +func (b *Builder) PlaceFloat64(x float64) { + b.head -= UOffsetT(SizeFloat64) + WriteFloat64(b.Bytes[b.head:], x) +} + +// PlaceByte prepends a byte to the Builder, without checking for space. +func (b *Builder) PlaceByte(x byte) { + b.head -= UOffsetT(SizeByte) + WriteByte(b.Bytes[b.head:], x) +} + +// PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space. +func (b *Builder) PlaceVOffsetT(x VOffsetT) { + b.head -= UOffsetT(SizeVOffsetT) + WriteVOffsetT(b.Bytes[b.head:], x) +} + +// PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space. +func (b *Builder) PlaceSOffsetT(x SOffsetT) { + b.head -= UOffsetT(SizeSOffsetT) + WriteSOffsetT(b.Bytes[b.head:], x) +} + +// PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space. +func (b *Builder) PlaceUOffsetT(x UOffsetT) { + b.head -= UOffsetT(SizeUOffsetT) + WriteUOffsetT(b.Bytes[b.head:], x) +} diff --git a/vendor/github.com/google/flatbuffers/go/doc.go b/vendor/github.com/google/flatbuffers/go/doc.go new file mode 100644 index 00000000000..694edc763d8 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/doc.go @@ -0,0 +1,3 @@ +// Package flatbuffers provides facilities to read and write flatbuffers +// objects. +package flatbuffers diff --git a/vendor/github.com/google/flatbuffers/go/encode.go b/vendor/github.com/google/flatbuffers/go/encode.go new file mode 100644 index 00000000000..a2a57981255 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/encode.go @@ -0,0 +1,238 @@ +package flatbuffers + +import ( + "math" +) + +type ( + // A SOffsetT stores a signed offset into arbitrary data. + SOffsetT int32 + // A UOffsetT stores an unsigned offset into vector data. + UOffsetT uint32 + // A VOffsetT stores an unsigned offset in a vtable. + VOffsetT uint16 +) + +const ( + // VtableMetadataFields is the count of metadata fields in each vtable. + VtableMetadataFields = 2 +) + +// GetByte decodes a little-endian byte from a byte slice. +func GetByte(buf []byte) byte { + return byte(GetUint8(buf)) +} + +// GetBool decodes a little-endian bool from a byte slice. +func GetBool(buf []byte) bool { + return buf[0] == 1 +} + +// GetUint8 decodes a little-endian uint8 from a byte slice. +func GetUint8(buf []byte) (n uint8) { + n = uint8(buf[0]) + return +} + +// GetUint16 decodes a little-endian uint16 from a byte slice. +func GetUint16(buf []byte) (n uint16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + n |= uint16(buf[0]) + n |= uint16(buf[1]) << 8 + return +} + +// GetUint32 decodes a little-endian uint32 from a byte slice. +func GetUint32(buf []byte) (n uint32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + n |= uint32(buf[0]) + n |= uint32(buf[1]) << 8 + n |= uint32(buf[2]) << 16 + n |= uint32(buf[3]) << 24 + return +} + +// GetUint64 decodes a little-endian uint64 from a byte slice. +func GetUint64(buf []byte) (n uint64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + n |= uint64(buf[0]) + n |= uint64(buf[1]) << 8 + n |= uint64(buf[2]) << 16 + n |= uint64(buf[3]) << 24 + n |= uint64(buf[4]) << 32 + n |= uint64(buf[5]) << 40 + n |= uint64(buf[6]) << 48 + n |= uint64(buf[7]) << 56 + return +} + +// GetInt8 decodes a little-endian int8 from a byte slice. +func GetInt8(buf []byte) (n int8) { + n = int8(buf[0]) + return +} + +// GetInt16 decodes a little-endian int16 from a byte slice. +func GetInt16(buf []byte) (n int16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + n |= int16(buf[0]) + n |= int16(buf[1]) << 8 + return +} + +// GetInt32 decodes a little-endian int32 from a byte slice. +func GetInt32(buf []byte) (n int32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + n |= int32(buf[0]) + n |= int32(buf[1]) << 8 + n |= int32(buf[2]) << 16 + n |= int32(buf[3]) << 24 + return +} + +// GetInt64 decodes a little-endian int64 from a byte slice. +func GetInt64(buf []byte) (n int64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + n |= int64(buf[0]) + n |= int64(buf[1]) << 8 + n |= int64(buf[2]) << 16 + n |= int64(buf[3]) << 24 + n |= int64(buf[4]) << 32 + n |= int64(buf[5]) << 40 + n |= int64(buf[6]) << 48 + n |= int64(buf[7]) << 56 + return +} + +// GetFloat32 decodes a little-endian float32 from a byte slice. +func GetFloat32(buf []byte) float32 { + x := GetUint32(buf) + return math.Float32frombits(x) +} + +// GetFloat64 decodes a little-endian float64 from a byte slice. +func GetFloat64(buf []byte) float64 { + x := GetUint64(buf) + return math.Float64frombits(x) +} + +// GetUOffsetT decodes a little-endian UOffsetT from a byte slice. +func GetUOffsetT(buf []byte) UOffsetT { + return UOffsetT(GetUint32(buf)) +} + +// GetSOffsetT decodes a little-endian SOffsetT from a byte slice. +func GetSOffsetT(buf []byte) SOffsetT { + return SOffsetT(GetInt32(buf)) +} + +// GetVOffsetT decodes a little-endian VOffsetT from a byte slice. +func GetVOffsetT(buf []byte) VOffsetT { + return VOffsetT(GetUint16(buf)) +} + +// WriteByte encodes a little-endian uint8 into a byte slice. +func WriteByte(buf []byte, n byte) { + WriteUint8(buf, uint8(n)) +} + +// WriteBool encodes a little-endian bool into a byte slice. +func WriteBool(buf []byte, b bool) { + buf[0] = 0 + if b { + buf[0] = 1 + } +} + +// WriteUint8 encodes a little-endian uint8 into a byte slice. +func WriteUint8(buf []byte, n uint8) { + buf[0] = byte(n) +} + +// WriteUint16 encodes a little-endian uint16 into a byte slice. +func WriteUint16(buf []byte, n uint16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) +} + +// WriteUint32 encodes a little-endian uint32 into a byte slice. +func WriteUint32(buf []byte, n uint32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) +} + +// WriteUint64 encodes a little-endian uint64 into a byte slice. +func WriteUint64(buf []byte, n uint64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) + buf[4] = byte(n >> 32) + buf[5] = byte(n >> 40) + buf[6] = byte(n >> 48) + buf[7] = byte(n >> 56) +} + +// WriteInt8 encodes a little-endian int8 into a byte slice. +func WriteInt8(buf []byte, n int8) { + buf[0] = byte(n) +} + +// WriteInt16 encodes a little-endian int16 into a byte slice. +func WriteInt16(buf []byte, n int16) { + _ = buf[1] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) +} + +// WriteInt32 encodes a little-endian int32 into a byte slice. +func WriteInt32(buf []byte, n int32) { + _ = buf[3] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) +} + +// WriteInt64 encodes a little-endian int64 into a byte slice. +func WriteInt64(buf []byte, n int64) { + _ = buf[7] // Force one bounds check. See: golang.org/issue/14808 + buf[0] = byte(n) + buf[1] = byte(n >> 8) + buf[2] = byte(n >> 16) + buf[3] = byte(n >> 24) + buf[4] = byte(n >> 32) + buf[5] = byte(n >> 40) + buf[6] = byte(n >> 48) + buf[7] = byte(n >> 56) +} + +// WriteFloat32 encodes a little-endian float32 into a byte slice. +func WriteFloat32(buf []byte, n float32) { + WriteUint32(buf, math.Float32bits(n)) +} + +// WriteFloat64 encodes a little-endian float64 into a byte slice. +func WriteFloat64(buf []byte, n float64) { + WriteUint64(buf, math.Float64bits(n)) +} + +// WriteVOffsetT encodes a little-endian VOffsetT into a byte slice. +func WriteVOffsetT(buf []byte, n VOffsetT) { + WriteUint16(buf, uint16(n)) +} + +// WriteSOffsetT encodes a little-endian SOffsetT into a byte slice. +func WriteSOffsetT(buf []byte, n SOffsetT) { + WriteInt32(buf, int32(n)) +} + +// WriteUOffsetT encodes a little-endian UOffsetT into a byte slice. +func WriteUOffsetT(buf []byte, n UOffsetT) { + WriteUint32(buf, uint32(n)) +} diff --git a/vendor/github.com/google/flatbuffers/go/grpc.go b/vendor/github.com/google/flatbuffers/go/grpc.go new file mode 100644 index 00000000000..15f1a510d3b --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/grpc.go @@ -0,0 +1,38 @@ +package flatbuffers + +// Codec implements gRPC-go Codec which is used to encode and decode messages. +var Codec = "flatbuffers" + +// FlatbuffersCodec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type FlatbuffersCodec struct{} + +// Marshal returns the wire format of v. +func (FlatbuffersCodec) Marshal(v interface{}) ([]byte, error) { + return v.(*Builder).FinishedBytes(), nil +} + +// Unmarshal parses the wire format into v. +func (FlatbuffersCodec) Unmarshal(data []byte, v interface{}) error { + v.(flatbuffersInit).Init(data, GetUOffsetT(data)) + return nil +} + +// String old gRPC Codec interface func +func (FlatbuffersCodec) String() string { + return Codec +} + +// Name returns the name of the Codec implementation. The returned string +// will be used as part of content type in transmission. The result must be +// static; the result cannot change between calls. +// +// add Name() for ForceCodec interface +func (FlatbuffersCodec) Name() string { + return Codec +} + +type flatbuffersInit interface { + Init(data []byte, i UOffsetT) +} diff --git a/vendor/github.com/google/flatbuffers/go/lib.go b/vendor/github.com/google/flatbuffers/go/lib.go new file mode 100644 index 00000000000..9a333ff04d8 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/lib.go @@ -0,0 +1,25 @@ +package flatbuffers + +// FlatBuffer is the interface that represents a flatbuffer. +type FlatBuffer interface { + Table() Table + Init(buf []byte, i UOffsetT) +} + +// GetRootAs is a generic helper to initialize a FlatBuffer with the provided buffer bytes and its data offset. +func GetRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) { + n := GetUOffsetT(buf[offset:]) + fb.Init(buf, n+offset) +} + +// GetSizePrefixedRootAs is a generic helper to initialize a FlatBuffer with the provided size-prefixed buffer +// bytes and its data offset +func GetSizePrefixedRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) { + n := GetUOffsetT(buf[offset+sizePrefixLength:]) + fb.Init(buf, n+offset+sizePrefixLength) +} + +// GetSizePrefix reads the size from a size-prefixed flatbuffer +func GetSizePrefix(buf []byte, offset UOffsetT) uint32 { + return GetUint32(buf[offset:]) +} diff --git a/vendor/github.com/google/flatbuffers/go/sizes.go b/vendor/github.com/google/flatbuffers/go/sizes.go new file mode 100644 index 00000000000..ba221698455 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/sizes.go @@ -0,0 +1,55 @@ +package flatbuffers + +import ( + "unsafe" +) + +const ( + // See http://golang.org/ref/spec#Numeric_types + + // SizeUint8 is the byte size of a uint8. + SizeUint8 = 1 + // SizeUint16 is the byte size of a uint16. + SizeUint16 = 2 + // SizeUint32 is the byte size of a uint32. + SizeUint32 = 4 + // SizeUint64 is the byte size of a uint64. + SizeUint64 = 8 + + // SizeInt8 is the byte size of a int8. + SizeInt8 = 1 + // SizeInt16 is the byte size of a int16. + SizeInt16 = 2 + // SizeInt32 is the byte size of a int32. + SizeInt32 = 4 + // SizeInt64 is the byte size of a int64. + SizeInt64 = 8 + + // SizeFloat32 is the byte size of a float32. + SizeFloat32 = 4 + // SizeFloat64 is the byte size of a float64. + SizeFloat64 = 8 + + // SizeByte is the byte size of a byte. + // The `byte` type is aliased (by Go definition) to uint8. + SizeByte = 1 + + // SizeBool is the byte size of a bool. + // The `bool` type is aliased (by flatbuffers convention) to uint8. + SizeBool = 1 + + // SizeSOffsetT is the byte size of an SOffsetT. + // The `SOffsetT` type is aliased (by flatbuffers convention) to int32. + SizeSOffsetT = 4 + // SizeUOffsetT is the byte size of an UOffsetT. + // The `UOffsetT` type is aliased (by flatbuffers convention) to uint32. + SizeUOffsetT = 4 + // SizeVOffsetT is the byte size of an VOffsetT. + // The `VOffsetT` type is aliased (by flatbuffers convention) to uint16. + SizeVOffsetT = 2 +) + +// byteSliceToString converts a []byte to string without a heap allocation. +func byteSliceToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/google/flatbuffers/go/struct.go b/vendor/github.com/google/flatbuffers/go/struct.go new file mode 100644 index 00000000000..11258f715d4 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/struct.go @@ -0,0 +1,8 @@ +package flatbuffers + +// Struct wraps a byte slice and provides read access to its data. +// +// Structs do not have a vtable. +type Struct struct { + Table +} diff --git a/vendor/github.com/google/flatbuffers/go/table.go b/vendor/github.com/google/flatbuffers/go/table.go new file mode 100644 index 00000000000..b273146fad4 --- /dev/null +++ b/vendor/github.com/google/flatbuffers/go/table.go @@ -0,0 +1,505 @@ +package flatbuffers + +// Table wraps a byte slice and provides read access to its data. +// +// The variable `Pos` indicates the root of the FlatBuffers object therein. +type Table struct { + Bytes []byte + Pos UOffsetT // Always < 1<<31. +} + +// Offset provides access into the Table's vtable. +// +// Fields which are deprecated are ignored by checking against the vtable's length. +func (t *Table) Offset(vtableOffset VOffsetT) VOffsetT { + vtable := UOffsetT(SOffsetT(t.Pos) - t.GetSOffsetT(t.Pos)) + if vtableOffset < t.GetVOffsetT(vtable) { + return t.GetVOffsetT(vtable + UOffsetT(vtableOffset)) + } + return 0 +} + +// Indirect retrieves the relative offset stored at `offset`. +func (t *Table) Indirect(off UOffsetT) UOffsetT { + return off + GetUOffsetT(t.Bytes[off:]) +} + +// String gets a string from data stored inside the flatbuffer. +func (t *Table) String(off UOffsetT) string { + b := t.ByteVector(off) + return byteSliceToString(b) +} + +// ByteVector gets a byte slice from data stored inside the flatbuffer. +func (t *Table) ByteVector(off UOffsetT) []byte { + off += GetUOffsetT(t.Bytes[off:]) + start := off + UOffsetT(SizeUOffsetT) + length := GetUOffsetT(t.Bytes[off:]) + return t.Bytes[start : start+length] +} + +// VectorLen retrieves the length of the vector whose offset is stored at +// "off" in this object. +func (t *Table) VectorLen(off UOffsetT) int { + off += t.Pos + off += GetUOffsetT(t.Bytes[off:]) + return int(GetUOffsetT(t.Bytes[off:])) +} + +// Vector retrieves the start of data of the vector whose offset is stored +// at "off" in this object. +func (t *Table) Vector(off UOffsetT) UOffsetT { + off += t.Pos + x := off + GetUOffsetT(t.Bytes[off:]) + // data starts after metadata containing the vector length + x += UOffsetT(SizeUOffsetT) + return x +} + +// Union initializes any Table-derived type to point to the union at the given +// offset. +func (t *Table) Union(t2 *Table, off UOffsetT) { + off += t.Pos + t2.Pos = off + t.GetUOffsetT(off) + t2.Bytes = t.Bytes +} + +// GetBool retrieves a bool at the given offset. +func (t *Table) GetBool(off UOffsetT) bool { + return GetBool(t.Bytes[off:]) +} + +// GetByte retrieves a byte at the given offset. +func (t *Table) GetByte(off UOffsetT) byte { + return GetByte(t.Bytes[off:]) +} + +// GetUint8 retrieves a uint8 at the given offset. +func (t *Table) GetUint8(off UOffsetT) uint8 { + return GetUint8(t.Bytes[off:]) +} + +// GetUint16 retrieves a uint16 at the given offset. +func (t *Table) GetUint16(off UOffsetT) uint16 { + return GetUint16(t.Bytes[off:]) +} + +// GetUint32 retrieves a uint32 at the given offset. +func (t *Table) GetUint32(off UOffsetT) uint32 { + return GetUint32(t.Bytes[off:]) +} + +// GetUint64 retrieves a uint64 at the given offset. +func (t *Table) GetUint64(off UOffsetT) uint64 { + return GetUint64(t.Bytes[off:]) +} + +// GetInt8 retrieves a int8 at the given offset. +func (t *Table) GetInt8(off UOffsetT) int8 { + return GetInt8(t.Bytes[off:]) +} + +// GetInt16 retrieves a int16 at the given offset. +func (t *Table) GetInt16(off UOffsetT) int16 { + return GetInt16(t.Bytes[off:]) +} + +// GetInt32 retrieves a int32 at the given offset. +func (t *Table) GetInt32(off UOffsetT) int32 { + return GetInt32(t.Bytes[off:]) +} + +// GetInt64 retrieves a int64 at the given offset. +func (t *Table) GetInt64(off UOffsetT) int64 { + return GetInt64(t.Bytes[off:]) +} + +// GetFloat32 retrieves a float32 at the given offset. +func (t *Table) GetFloat32(off UOffsetT) float32 { + return GetFloat32(t.Bytes[off:]) +} + +// GetFloat64 retrieves a float64 at the given offset. +func (t *Table) GetFloat64(off UOffsetT) float64 { + return GetFloat64(t.Bytes[off:]) +} + +// GetUOffsetT retrieves a UOffsetT at the given offset. +func (t *Table) GetUOffsetT(off UOffsetT) UOffsetT { + return GetUOffsetT(t.Bytes[off:]) +} + +// GetVOffsetT retrieves a VOffsetT at the given offset. +func (t *Table) GetVOffsetT(off UOffsetT) VOffsetT { + return GetVOffsetT(t.Bytes[off:]) +} + +// GetSOffsetT retrieves a SOffsetT at the given offset. +func (t *Table) GetSOffsetT(off UOffsetT) SOffsetT { + return GetSOffsetT(t.Bytes[off:]) +} + +// GetBoolSlot retrieves the bool that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetBoolSlot(slot VOffsetT, d bool) bool { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetBool(t.Pos + UOffsetT(off)) +} + +// GetByteSlot retrieves the byte that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetByteSlot(slot VOffsetT, d byte) byte { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetByte(t.Pos + UOffsetT(off)) +} + +// GetInt8Slot retrieves the int8 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt8Slot(slot VOffsetT, d int8) int8 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt8(t.Pos + UOffsetT(off)) +} + +// GetUint8Slot retrieves the uint8 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint8Slot(slot VOffsetT, d uint8) uint8 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint8(t.Pos + UOffsetT(off)) +} + +// GetInt16Slot retrieves the int16 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt16Slot(slot VOffsetT, d int16) int16 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt16(t.Pos + UOffsetT(off)) +} + +// GetUint16Slot retrieves the uint16 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint16Slot(slot VOffsetT, d uint16) uint16 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint16(t.Pos + UOffsetT(off)) +} + +// GetInt32Slot retrieves the int32 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt32Slot(slot VOffsetT, d int32) int32 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt32(t.Pos + UOffsetT(off)) +} + +// GetUint32Slot retrieves the uint32 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint32Slot(slot VOffsetT, d uint32) uint32 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint32(t.Pos + UOffsetT(off)) +} + +// GetInt64Slot retrieves the int64 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetInt64Slot(slot VOffsetT, d int64) int64 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetInt64(t.Pos + UOffsetT(off)) +} + +// GetUint64Slot retrieves the uint64 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetUint64Slot(slot VOffsetT, d uint64) uint64 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetUint64(t.Pos + UOffsetT(off)) +} + +// GetFloat32Slot retrieves the float32 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetFloat32Slot(slot VOffsetT, d float32) float32 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetFloat32(t.Pos + UOffsetT(off)) +} + +// GetFloat64Slot retrieves the float64 that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetFloat64Slot(slot VOffsetT, d float64) float64 { + off := t.Offset(slot) + if off == 0 { + return d + } + + return t.GetFloat64(t.Pos + UOffsetT(off)) +} + +// GetVOffsetTSlot retrieves the VOffsetT that the given vtable location +// points to. If the vtable value is zero, the default value `d` +// will be returned. +func (t *Table) GetVOffsetTSlot(slot VOffsetT, d VOffsetT) VOffsetT { + off := t.Offset(slot) + if off == 0 { + return d + } + return VOffsetT(off) +} + +// MutateBool updates a bool at the given offset. +func (t *Table) MutateBool(off UOffsetT, n bool) bool { + WriteBool(t.Bytes[off:], n) + return true +} + +// MutateByte updates a Byte at the given offset. +func (t *Table) MutateByte(off UOffsetT, n byte) bool { + WriteByte(t.Bytes[off:], n) + return true +} + +// MutateUint8 updates a Uint8 at the given offset. +func (t *Table) MutateUint8(off UOffsetT, n uint8) bool { + WriteUint8(t.Bytes[off:], n) + return true +} + +// MutateUint16 updates a Uint16 at the given offset. +func (t *Table) MutateUint16(off UOffsetT, n uint16) bool { + WriteUint16(t.Bytes[off:], n) + return true +} + +// MutateUint32 updates a Uint32 at the given offset. +func (t *Table) MutateUint32(off UOffsetT, n uint32) bool { + WriteUint32(t.Bytes[off:], n) + return true +} + +// MutateUint64 updates a Uint64 at the given offset. +func (t *Table) MutateUint64(off UOffsetT, n uint64) bool { + WriteUint64(t.Bytes[off:], n) + return true +} + +// MutateInt8 updates a Int8 at the given offset. +func (t *Table) MutateInt8(off UOffsetT, n int8) bool { + WriteInt8(t.Bytes[off:], n) + return true +} + +// MutateInt16 updates a Int16 at the given offset. +func (t *Table) MutateInt16(off UOffsetT, n int16) bool { + WriteInt16(t.Bytes[off:], n) + return true +} + +// MutateInt32 updates a Int32 at the given offset. +func (t *Table) MutateInt32(off UOffsetT, n int32) bool { + WriteInt32(t.Bytes[off:], n) + return true +} + +// MutateInt64 updates a Int64 at the given offset. +func (t *Table) MutateInt64(off UOffsetT, n int64) bool { + WriteInt64(t.Bytes[off:], n) + return true +} + +// MutateFloat32 updates a Float32 at the given offset. +func (t *Table) MutateFloat32(off UOffsetT, n float32) bool { + WriteFloat32(t.Bytes[off:], n) + return true +} + +// MutateFloat64 updates a Float64 at the given offset. +func (t *Table) MutateFloat64(off UOffsetT, n float64) bool { + WriteFloat64(t.Bytes[off:], n) + return true +} + +// MutateUOffsetT updates a UOffsetT at the given offset. +func (t *Table) MutateUOffsetT(off UOffsetT, n UOffsetT) bool { + WriteUOffsetT(t.Bytes[off:], n) + return true +} + +// MutateVOffsetT updates a VOffsetT at the given offset. +func (t *Table) MutateVOffsetT(off UOffsetT, n VOffsetT) bool { + WriteVOffsetT(t.Bytes[off:], n) + return true +} + +// MutateSOffsetT updates a SOffsetT at the given offset. +func (t *Table) MutateSOffsetT(off UOffsetT, n SOffsetT) bool { + WriteSOffsetT(t.Bytes[off:], n) + return true +} + +// MutateBoolSlot updates the bool at given vtable location +func (t *Table) MutateBoolSlot(slot VOffsetT, n bool) bool { + if off := t.Offset(slot); off != 0 { + t.MutateBool(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateByteSlot updates the byte at given vtable location +func (t *Table) MutateByteSlot(slot VOffsetT, n byte) bool { + if off := t.Offset(slot); off != 0 { + t.MutateByte(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt8Slot updates the int8 at given vtable location +func (t *Table) MutateInt8Slot(slot VOffsetT, n int8) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt8(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint8Slot updates the uint8 at given vtable location +func (t *Table) MutateUint8Slot(slot VOffsetT, n uint8) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint8(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt16Slot updates the int16 at given vtable location +func (t *Table) MutateInt16Slot(slot VOffsetT, n int16) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt16(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint16Slot updates the uint16 at given vtable location +func (t *Table) MutateUint16Slot(slot VOffsetT, n uint16) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint16(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt32Slot updates the int32 at given vtable location +func (t *Table) MutateInt32Slot(slot VOffsetT, n int32) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt32(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint32Slot updates the uint32 at given vtable location +func (t *Table) MutateUint32Slot(slot VOffsetT, n uint32) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint32(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateInt64Slot updates the int64 at given vtable location +func (t *Table) MutateInt64Slot(slot VOffsetT, n int64) bool { + if off := t.Offset(slot); off != 0 { + t.MutateInt64(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateUint64Slot updates the uint64 at given vtable location +func (t *Table) MutateUint64Slot(slot VOffsetT, n uint64) bool { + if off := t.Offset(slot); off != 0 { + t.MutateUint64(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateFloat32Slot updates the float32 at given vtable location +func (t *Table) MutateFloat32Slot(slot VOffsetT, n float32) bool { + if off := t.Offset(slot); off != 0 { + t.MutateFloat32(t.Pos+UOffsetT(off), n) + return true + } + + return false +} + +// MutateFloat64Slot updates the float64 at given vtable location +func (t *Table) MutateFloat64Slot(slot VOffsetT, n float64) bool { + if off := t.Offset(slot); off != 0 { + t.MutateFloat64(t.Pos+UOffsetT(off), n) + return true + } + + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 40d14725291..477fe6a016c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -422,6 +422,9 @@ github.com/golang/protobuf/ptypes/wrappers github.com/golang/snappy # github.com/google/btree v1.0.0 github.com/google/btree +# github.com/google/flatbuffers v2.0.0+incompatible +## explicit +github.com/google/flatbuffers/go # github.com/google/go-cmp v0.5.6 ## explicit github.com/google/go-cmp/cmp @@ -702,6 +705,7 @@ github.com/oklog/ulid ## explicit github.com/olekukonko/tablewriter # github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e +## explicit github.com/opentracing-contrib/go-grpc # github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing-contrib/go-stdlib/nethttp