From f32d61745379dd0b232f5c99c31aa54929324064 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 01:32:27 -0700 Subject: [PATCH 01/21] Output Aggregator.Checkpoint into another Aggregator --- sdk/export/metric/aggregator/aggregator.go | 6 ++--- sdk/export/metric/metric.go | 10 +++++--- sdk/metric/aggregator/sum/sum.go | 23 +++++++++-------- sdk/metric/aggregator/sum/sum_test.go | 30 ++++++++++++---------- 4 files changed, 37 insertions(+), 32 deletions(-) diff --git a/sdk/export/metric/aggregator/aggregator.go b/sdk/export/metric/aggregator/aggregator.go index 58176041281..0944ed7793a 100644 --- a/sdk/export/metric/aggregator/aggregator.go +++ b/sdk/export/metric/aggregator/aggregator.go @@ -112,11 +112,11 @@ var ( ErrNoData = fmt.Errorf("no data collected by this aggregator") ) -// NewInconsistentMergeError formats an error describing an attempt to +// NewInconsistentAggregatorError formats an error describing an attempt to // merge different-type aggregators. The result can be unwrapped as // an ErrInconsistentType. -func NewInconsistentMergeError(a1, a2 export.Aggregator) error { - return fmt.Errorf("cannot merge %T with %T: %w", a1, a2, ErrInconsistentType) +func NewInconsistentAggregatorError(a1, a2 export.Aggregator) error { + return fmt.Errorf("%w: %T and %T", ErrInconsistentType, a1, a2) } // RangeTest is a commmon routine for testing for valid input values. diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 6c3d49a5814..c7e8e63d873 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -73,7 +73,7 @@ type Integrator interface { // AggregationSelector supports selecting the kind of Aggregator to // use at runtime for a specific metric instrument. type AggregationSelector interface { - // AggregatorFor returns the kind of aggregator suited to the + // AggregatorFor returns two aggregators of a kind suited to the // requested export. Returning `nil` indicates to ignore this // metric instrument. This must return a consistent type to // avoid confusion in later stages of the metrics export @@ -83,7 +83,7 @@ type AggregationSelector interface { // Note: This is context-free because the aggregator should // not relate to the incoming context. This call should not // block. - AggregatorFor(*metric.Descriptor) Aggregator + AggregatorFor(*metric.Descriptor) [2]Aggregator } // Aggregator implements a specific aggregation behavior, e.g., a @@ -110,7 +110,9 @@ type Aggregator interface { Update(context.Context, metric.Number, *metric.Descriptor) error // Checkpoint is called during collection to finish one period - // of aggregation by atomically saving the current value. + // of aggregation by atomically saving the current value into + // the argument. + // // Checkpoint() is called concurrently with Update(). // Checkpoint should reset the current state to the empty // state, in order to begin computing a new delta for the next @@ -122,7 +124,7 @@ type Aggregator interface { // // This call has no Context argument because it is expected to // perform only computation. - Checkpoint(*metric.Descriptor) + Checkpoint(Aggregator, *metric.Descriptor) error // Merge combines the checkpointed state from the argument // aggregator into this aggregator's checkpointed state. diff --git a/sdk/metric/aggregator/sum/sum.go b/sdk/metric/aggregator/sum/sum.go index 832dce33f7c..091e572801a 100644 --- a/sdk/metric/aggregator/sum/sum.go +++ b/sdk/metric/aggregator/sum/sum.go @@ -26,11 +26,7 @@ import ( type Aggregator struct { // current holds current increments to this counter record // current needs to be aligned for 64-bit atomic operations. - current metric.Number - - // checkpoint is a temporary used during Checkpoint() - // checkpoint needs to be aligned for 64-bit atomic operations. - checkpoint metric.Number + value metric.Number } var _ export.Aggregator = &Aggregator{} @@ -46,18 +42,23 @@ func New() *Aggregator { // Sum returns the last-checkpointed sum. This will never return an // error. func (c *Aggregator) Sum() (metric.Number, error) { - return c.checkpoint, nil + return c.value, nil } // Checkpoint atomically saves the current value and resets the // current sum to zero. -func (c *Aggregator) Checkpoint(*metric.Descriptor) { - c.checkpoint = c.current.SwapNumberAtomic(metric.Number(0)) +func (c *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) error { + o, _ := oa.(*Aggregator) + if o == nil { + return aggregator.NewInconsistentAggregatorError(c, oa) + } + o.value = c.value.SwapNumberAtomic(metric.Number(0)) + return nil } // Update atomically adds to the current value. func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error { - c.current.AddNumberAtomic(desc.NumberKind(), number) + c.value.AddNumberAtomic(desc.NumberKind(), number) return nil } @@ -65,8 +66,8 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.NewInconsistentMergeError(c, oa) + return aggregator.NewInconsistentAggregatorError(c, oa) } - c.checkpoint.AddNumber(desc.NumberKind(), o.checkpoint) + c.value.AddNumber(desc.NumberKind(), o.value) return nil } diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index 6df2de3f3ae..a0ba38f1eed 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -32,12 +32,8 @@ const count = 100 func TestMain(m *testing.M) { fields := []ottest.FieldOffset{ { - Name: "Aggregator.current", - Offset: unsafe.Offsetof(Aggregator{}.current), - }, - { - Name: "Aggregator.checkpoint", - Offset: unsafe.Offsetof(Aggregator{}.checkpoint), + Name: "Aggregator.value", + Offset: unsafe.Offsetof(Aggregator{}.value), }, } if !ottest.Aligned8Byte(fields, os.Stderr) { @@ -50,6 +46,7 @@ func TestMain(m *testing.M) { func TestCounterSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() + ckpt := New() descriptor := test.NewAggregatorTest(metric.CounterKind, profile.NumberKind) @@ -60,9 +57,10 @@ func TestCounterSum(t *testing.T) { test.CheckedUpdate(t, agg, x, descriptor) } - agg.Checkpoint(descriptor) + err := agg.Checkpoint(ckpt, descriptor) + require.NoError(t, err) - asum, err := agg.Sum() + asum, err := ckpt.Sum() require.Equal(t, sum, asum, "Same sum - monotonic") require.Nil(t, err) }) @@ -71,6 +69,7 @@ func TestCounterSum(t *testing.T) { func TestValueRecorderSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() + ckpt := New() descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) @@ -85,9 +84,9 @@ func TestValueRecorderSum(t *testing.T) { sum.AddNumber(profile.NumberKind, r2) } - agg.Checkpoint(descriptor) + agg.Checkpoint(ckpt, descriptor) - asum, err := agg.Sum() + asum, err := ckpt.Sum() require.Equal(t, sum, asum, "Same sum - monotonic") require.Nil(t, err) }) @@ -98,6 +97,9 @@ func TestCounterMerge(t *testing.T) { agg1 := New() agg2 := New() + ckpt1 := New() + ckpt2 := New() + descriptor := test.NewAggregatorTest(metric.CounterKind, profile.NumberKind) sum := metric.Number(0) @@ -108,14 +110,14 @@ func TestCounterMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - agg1.Checkpoint(descriptor) - agg2.Checkpoint(descriptor) + agg1.Checkpoint(ckpt1, descriptor) + agg2.Checkpoint(ckpt2, descriptor) - test.CheckedMerge(t, agg1, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) sum.AddNumber(descriptor.NumberKind(), sum) - asum, err := agg1.Sum() + asum, err := ckpt1.Sum() require.Equal(t, sum, asum, "Same sum - monotonic") require.Nil(t, err) }) From e248a2854d3d3198fa90e54b843bca49b5a182e8 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 11:19:01 -0700 Subject: [PATCH 02/21] Update array --- sdk/metric/aggregator/array/array.go | 68 +++++++------ sdk/metric/aggregator/array/array_test.go | 111 ++++++++++++---------- 2 files changed, 101 insertions(+), 78 deletions(-) diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index f3b15c564bc..437fd4e7957 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -30,11 +30,9 @@ type ( // Aggregator aggregates events that form a distribution, keeping // an array with the exact set of values. Aggregator struct { - // ckptSum needs to be aligned for 64-bit atomic operations. - ckptSum metric.Number - lock sync.Mutex - current points - checkpoint points + lock sync.Mutex + sum metric.Number + points points } points []metric.Number @@ -54,55 +52,60 @@ func New() *Aggregator { // Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (metric.Number, error) { - return c.ckptSum, nil + if len(c.points) == 0 { + return 0, aggregator.ErrNoData + } + return c.sum, nil } // Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { - return int64(len(c.checkpoint)), nil + if len(c.points) == 0 { + return 0, aggregator.ErrNoData + } + return int64(len(c.points)), nil } // Max returns the maximum value in the checkpoint. func (c *Aggregator) Max() (metric.Number, error) { - return c.checkpoint.Quantile(1) + return c.points.Quantile(1) } // Min returns the mininum value in the checkpoint. func (c *Aggregator) Min() (metric.Number, error) { - return c.checkpoint.Quantile(0) + return c.points.Quantile(0) } // Quantile returns the estimated quantile of data in the checkpoint. // It is an error if `q` is less than 0 or greated than 1. func (c *Aggregator) Quantile(q float64) (metric.Number, error) { - return c.checkpoint.Quantile(q) + return c.points.Quantile(q) } // Points returns access to the raw data set. func (c *Aggregator) Points() ([]metric.Number, error) { - return c.checkpoint, nil + return c.points, nil } // Checkpoint saves the current state and resets the current state to // the empty set, taking a lock to prevent concurrent Update() calls. -func (c *Aggregator) Checkpoint(desc *metric.Descriptor) { +func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) error { + o, _ := oa.(*Aggregator) + if o == nil { + return aggregator.NewInconsistentAggregatorError(c, oa) + } + c.lock.Lock() - c.checkpoint, c.current = c.current, nil + o.points, c.points = c.points, nil + o.sum, c.sum = c.sum, 0 c.lock.Unlock() - kind := desc.NumberKind() - // TODO: This sort should be done lazily, only when quantiles // are requested. The SDK specification says you can use this // aggregator to simply list values in the order they were // received as an alternative to requesting quantile information. - c.sort(kind) - - c.ckptSum = metric.Number(0) - - for _, v := range c.checkpoint { - c.ckptSum.AddNumber(kind, v) - } + o.sort(desc.NumberKind()) + return nil } // Update adds the recorded measurement to the current data set. @@ -110,8 +113,10 @@ func (c *Aggregator) Checkpoint(desc *metric.Descriptor) { // calls. func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error { c.lock.Lock() - c.current = append(c.current, number) + c.points = append(c.points, number) + c.sum.AddNumber(desc.NumberKind(), number) c.lock.Unlock() + return nil } @@ -119,21 +124,24 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.NewInconsistentMergeError(c, oa) + return aggregator.NewInconsistentAggregatorError(c, oa) } - c.ckptSum.AddNumber(desc.NumberKind(), o.ckptSum) - c.checkpoint = combine(c.checkpoint, o.checkpoint, desc.NumberKind()) + // Note: Current assumption is that `o` was checkpointed, + // therefore is already sorted. See the TODO above, since + // this is an open question. + c.sum.AddNumber(desc.NumberKind(), o.sum) + c.points = combine(c.points, o.points, desc.NumberKind()) return nil } func (c *Aggregator) sort(kind metric.NumberKind) { switch kind { case metric.Float64NumberKind: - sort.Float64s(*(*[]float64)(unsafe.Pointer(&c.checkpoint))) + sort.Float64s(*(*[]float64)(unsafe.Pointer(&c.points))) case metric.Int64NumberKind: - sort.Sort(&c.checkpoint) + sort.Sort(&c.points) default: // NOTE: This can't happen because the SDK doesn't @@ -179,11 +187,11 @@ func (p *points) Swap(i, j int) { // of a quantile. func (p *points) Quantile(q float64) (metric.Number, error) { if len(*p) == 0 { - return metric.Number(0), aggregator.ErrNoData + return 0, aggregator.ErrNoData } if q < 0 || q > 1 { - return metric.Number(0), aggregator.ErrInvalidQuantile + return 0, aggregator.ErrInvalidQuantile } if q == 0 || len(*p) == 1 { diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index e6ce683bf25..874ec597f98 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -17,41 +17,46 @@ package array import ( "fmt" "math" - "os" "testing" - "unsafe" + + "errors" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/api/metric" - ottest "go.opentelemetry.io/otel/internal/testing" "go.opentelemetry.io/otel/sdk/export/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) -// Ensure struct alignment prior to running tests. -func TestMain(m *testing.M) { - fields := []ottest.FieldOffset{ - { - Name: "Aggregator.ckptSum", - Offset: unsafe.Offsetof(Aggregator{}.ckptSum), - }, - } - if !ottest.Aligned8Byte(fields, os.Stderr) { - os.Exit(1) - } - - os.Exit(m.Run()) -} - type updateTest struct { count int } +func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { + kind := desc.NumberKind() + + sum, err := agg.Sum() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), sum) + + count, err := agg.Count() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, int64(0), count) + + max, err := agg.Max() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), max) + + min, err := agg.Min() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), min) +} + func (ut *updateTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) agg := New() + ckpt := New() all := test.NewNumbers(profile.NumberKind) @@ -65,11 +70,14 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { test.CheckedUpdate(t, agg, y, descriptor) } - agg.Checkpoint(descriptor) + err := agg.Checkpoint(ckpt, descriptor) + require.NoError(t, err) + + checkZero(t, agg, descriptor) all.Sort() - sum, err := agg.Sum() + sum, err := ckpt.Sum() require.Nil(t, err) allSum := all.Sum() require.InEpsilon(t, @@ -77,19 +85,19 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { sum.CoerceToFloat64(profile.NumberKind), 0.0000001, "Same sum") - count, err := agg.Count() + count, err := ckpt.Count() require.Nil(t, err) require.Equal(t, all.Count(), count, "Same count") - min, err := agg.Min() + min, err := ckpt.Min() require.Nil(t, err) require.Equal(t, all.Min(), min, "Same min") - max, err := agg.Max() + max, err := ckpt.Max() require.Nil(t, err) require.Equal(t, all.Max(), max, "Same max") - qx, err := agg.Quantile(0.5) + qx, err := ckpt.Quantile(0.5) require.Nil(t, err) require.Equal(t, all.Median(), qx, "Same median") } @@ -118,6 +126,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { agg1 := New() agg2 := New() + ckpt1 := New() + ckpt2 := New() all := test.NewNumbers(profile.NumberKind) @@ -141,14 +151,17 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - agg1.Checkpoint(descriptor) - agg2.Checkpoint(descriptor) + agg1.Checkpoint(ckpt1, descriptor) + agg2.Checkpoint(ckpt2, descriptor) + + checkZero(t, agg1, descriptor) + checkZero(t, agg2, descriptor) - test.CheckedMerge(t, agg1, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) all.Sort() - sum, err := agg1.Sum() + sum, err := ckpt1.Sum() require.Nil(t, err) allSum := all.Sum() require.InEpsilon(t, @@ -156,19 +169,19 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { sum.CoerceToFloat64(profile.NumberKind), 0.0000001, "Same sum - absolute") - count, err := agg1.Count() + count, err := ckpt1.Count() require.Nil(t, err) require.Equal(t, all.Count(), count, "Same count - absolute") - min, err := agg1.Min() + min, err := ckpt1.Min() require.Nil(t, err) require.Equal(t, all.Min(), min, "Same min - absolute") - max, err := agg1.Max() + max, err := ckpt1.Max() require.Nil(t, err) require.Equal(t, all.Max(), max, "Same max - absolute") - qx, err := agg1.Quantile(0.5) + qx, err := ckpt1.Quantile(0.5) require.Nil(t, err) require.Equal(t, all.Median(), qx, "Same median - absolute") } @@ -196,16 +209,17 @@ func TestArrayMerge(t *testing.T) { func TestArrayErrors(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() + ckpt := New() - _, err := agg.Max() + _, err := ckpt.Max() require.Error(t, err) require.Equal(t, err, aggregator.ErrNoData) - _, err = agg.Min() + _, err = ckpt.Min() require.Error(t, err) require.Equal(t, err, aggregator.ErrNoData) - _, err = agg.Quantile(0.1) + _, err = ckpt.Quantile(0.1) require.Error(t, err) require.Equal(t, err, aggregator.ErrNoData) @@ -216,23 +230,23 @@ func TestArrayErrors(t *testing.T) { if profile.NumberKind == metric.Float64NumberKind { test.CheckedUpdate(t, agg, metric.NewFloat64Number(math.NaN()), descriptor) } - agg.Checkpoint(descriptor) + agg.Checkpoint(ckpt, descriptor) - count, err := agg.Count() + count, err := ckpt.Count() require.Equal(t, int64(1), count, "NaN value was not counted") require.Nil(t, err) - num, err := agg.Quantile(0) + num, err := ckpt.Quantile(0) require.Nil(t, err) require.Equal(t, num, metric.Number(0)) - _, err = agg.Quantile(-0.0001) + _, err = ckpt.Quantile(-0.0001) require.Error(t, err) - require.Equal(t, err, aggregator.ErrInvalidQuantile) + require.True(t, errors.Is(err, aggregator.ErrInvalidQuantile)) _, err = agg.Quantile(1.0001) require.Error(t, err) - require.Equal(t, err, aggregator.ErrInvalidQuantile) + require.True(t, errors.Is(err, aggregator.ErrNoData)) }) } @@ -270,6 +284,7 @@ func TestArrayFloat64(t *testing.T) { all := test.NewNumbers(metric.Float64NumberKind) agg := New() + ckpt := New() for _, f := range fpsf(1) { all.Append(metric.NewFloat64Number(f)) @@ -281,32 +296,32 @@ func TestArrayFloat64(t *testing.T) { test.CheckedUpdate(t, agg, metric.NewFloat64Number(f), descriptor) } - agg.Checkpoint(descriptor) + agg.Checkpoint(ckpt, descriptor) all.Sort() - sum, err := agg.Sum() + sum, err := ckpt.Sum() require.Nil(t, err) allSum := all.Sum() require.InEpsilon(t, (&allSum).AsFloat64(), sum.AsFloat64(), 0.0000001, "Same sum") - count, err := agg.Count() + count, err := ckpt.Count() require.Equal(t, all.Count(), count, "Same count") require.Nil(t, err) - min, err := agg.Min() + min, err := ckpt.Min() require.Nil(t, err) require.Equal(t, all.Min(), min, "Same min") - max, err := agg.Max() + max, err := ckpt.Max() require.Nil(t, err) require.Equal(t, all.Max(), max, "Same max") - qx, err := agg.Quantile(0.5) + qx, err := ckpt.Quantile(0.5) require.Nil(t, err) require.Equal(t, all.Median(), qx, "Same median") - po, err := agg.Points() + po, err := ckpt.Points() require.Nil(t, err) require.Equal(t, all.Len(), len(po), "Points() must have same length of updates") for i := 0; i < len(po); i++ { From c49955dbeb20b77d2ea88eaa1ef2010d78168fd0 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 11:25:29 -0700 Subject: [PATCH 03/21] Update MMSC --- sdk/metric/aggregator/array/array.go | 6 -- sdk/metric/aggregator/array/array_test.go | 4 +- sdk/metric/aggregator/minmaxsumcount/mmsc.go | 88 +++++++++---------- .../aggregator/minmaxsumcount/mmsc_test.go | 63 +++++++++---- 4 files changed, 89 insertions(+), 72 deletions(-) diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 437fd4e7957..deeaf060c0f 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -52,17 +52,11 @@ func New() *Aggregator { // Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (metric.Number, error) { - if len(c.points) == 0 { - return 0, aggregator.ErrNoData - } return c.sum, nil } // Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { - if len(c.points) == 0 { - return 0, aggregator.ErrNoData - } return int64(len(c.points)), nil } diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 874ec597f98..2e782af5c26 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -36,11 +36,11 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { kind := desc.NumberKind() sum, err := agg.Sum() - require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.NoError(t, err) require.Equal(t, kind.Zero(), sum) count, err := agg.Count() - require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.NoError(t, err) require.Equal(t, int64(0), count) max, err := agg.Max() diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc.go b/sdk/metric/aggregator/minmaxsumcount/mmsc.go index f72e8e3ae36..edacfa2a596 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc.go @@ -27,21 +27,20 @@ type ( // Aggregator aggregates events that form a distribution, // keeping only the min, max, sum, and count. Aggregator struct { - lock sync.Mutex - current state - checkpoint state - kind metric.NumberKind + lock sync.Mutex + kind metric.NumberKind + state } state struct { - count metric.Number + count int64 sum metric.Number min metric.Number max metric.Number } ) -var _ export.Aggregator = &Aggregator{} +var w_ export.Aggregator = &Aggregator{} var _ aggregator.MinMaxSumCount = &Aggregator{} // New returns a new aggregator for computing the min, max, sum, and @@ -52,67 +51,60 @@ var _ aggregator.MinMaxSumCount = &Aggregator{} func New(desc *metric.Descriptor) *Aggregator { kind := desc.NumberKind() return &Aggregator{ - kind: kind, - current: state{ - count: metric.NewUint64Number(0), - sum: kind.Zero(), - min: kind.Maximum(), - max: kind.Minimum(), - }, + kind: kind, + state: emptyState(kind), } } // Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (metric.Number, error) { - c.lock.Lock() - defer c.lock.Unlock() - return c.checkpoint.sum, nil + return c.sum, nil } // Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { - c.lock.Lock() - defer c.lock.Unlock() - return c.checkpoint.count.CoerceToInt64(metric.Uint64NumberKind), nil + return c.count, nil } // Min returns the minimum value in the checkpoint. // The error value aggregator.ErrNoData will be returned // if there were no measurements recorded during the checkpoint. func (c *Aggregator) Min() (metric.Number, error) { - c.lock.Lock() - defer c.lock.Unlock() - if c.checkpoint.count.IsZero(metric.Uint64NumberKind) { - return c.kind.Zero(), aggregator.ErrNoData + if c.count == 0 { + return 0, aggregator.ErrNoData } - return c.checkpoint.min, nil + return c.min, nil } // Max returns the maximum value in the checkpoint. // The error value aggregator.ErrNoData will be returned // if there were no measurements recorded during the checkpoint. func (c *Aggregator) Max() (metric.Number, error) { - c.lock.Lock() - defer c.lock.Unlock() - if c.checkpoint.count.IsZero(metric.Uint64NumberKind) { - return c.kind.Zero(), aggregator.ErrNoData + if c.count == 0 { + return 0, aggregator.ErrNoData } - return c.checkpoint.max, nil + return c.max, nil } // Checkpoint saves the current state and resets the current state to // the empty set. -func (c *Aggregator) Checkpoint(desc *metric.Descriptor) { +func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) error { + o, _ := oa.(*Aggregator) + if o == nil { + return aggregator.NewInconsistentAggregatorError(c, oa) + } + c.lock.Lock() - c.checkpoint, c.current = c.current, c.emptyState() + o.state, c.state = c.state, emptyState(c.kind) c.lock.Unlock() + + return nil } -func (c *Aggregator) emptyState() state { - kind := c.kind +func emptyState(kind metric.NumberKind) state { return state{ - count: metric.NewUint64Number(0), - sum: kind.Zero(), + count: 0, + sum: 0, min: kind.Maximum(), max: kind.Minimum(), } @@ -124,13 +116,13 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri c.lock.Lock() defer c.lock.Unlock() - c.current.count.AddInt64(1) - c.current.sum.AddNumber(kind, number) - if number.CompareNumber(kind, c.current.min) < 0 { - c.current.min = number + c.count++ + c.sum.AddNumber(kind, number) + if number.CompareNumber(kind, c.min) < 0 { + c.min = number } - if number.CompareNumber(kind, c.current.max) > 0 { - c.current.max = number + if number.CompareNumber(kind, c.max) > 0 { + c.max = number } return nil } @@ -139,17 +131,17 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.NewInconsistentMergeError(c, oa) + return aggregator.NewInconsistentAggregatorError(c, oa) } - c.checkpoint.count.AddNumber(metric.Uint64NumberKind, o.checkpoint.count) - c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum) + c.count += o.count + c.sum.AddNumber(desc.NumberKind(), o.sum) - if c.checkpoint.min.CompareNumber(desc.NumberKind(), o.checkpoint.min) > 0 { - c.checkpoint.min.SetNumber(o.checkpoint.min) + if c.min.CompareNumber(desc.NumberKind(), o.min) > 0 { + c.min.SetNumber(o.min) } - if c.checkpoint.max.CompareNumber(desc.NumberKind(), o.checkpoint.max) < 0 { - c.checkpoint.max.SetNumber(o.checkpoint.max) + if c.max.CompareNumber(desc.NumberKind(), o.max) < 0 { + c.max.SetNumber(o.max) } return nil } diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go index 7e9325b8c75..d65dd2e595a 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go @@ -15,6 +15,7 @@ package minmaxsumcount import ( + "errors" "math" "math/rand" "testing" @@ -75,11 +76,32 @@ func TestMinMaxSumCountPositiveAndNegative(t *testing.T) { }) } +func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { + kind := desc.NumberKind() + + sum, err := agg.Sum() + require.NoError(t, err) + require.Equal(t, kind.Zero(), sum) + + count, err := agg.Count() + require.NoError(t, err) + require.Equal(t, int64(0), count) + + max, err := agg.Max() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), max) + + min, err := agg.Min() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), min) +} + // Validates min, max, sum and count for a given profile and policy func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) agg := New(descriptor) + ckpt := New(descriptor) all := test.NewNumbers(profile.NumberKind) @@ -89,11 +111,13 @@ func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - agg.Checkpoint(descriptor) + agg.Checkpoint(ckpt, descriptor) + + checkZero(t, agg, descriptor) all.Sort() - aggSum, err := agg.Sum() + aggSum, err := ckpt.Sum() require.Nil(t, err) allSum := all.Sum() require.InEpsilon(t, @@ -102,18 +126,18 @@ func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { 0.000000001, "Same sum - "+policy.name) - count, err := agg.Count() + count, err := ckpt.Count() require.Equal(t, all.Count(), count, "Same count -"+policy.name) require.Nil(t, err) - min, err := agg.Min() + min, err := ckpt.Min() require.Nil(t, err) require.Equal(t, all.Min(), min, "Same min -"+policy.name) - max, err := agg.Max() + max, err := ckpt.Max() require.Nil(t, err) require.Equal(t, all.Max(), @@ -127,6 +151,8 @@ func TestMinMaxSumCountMerge(t *testing.T) { agg1 := New(descriptor) agg2 := New(descriptor) + ckpt1 := New(descriptor) + ckpt2 := New(descriptor) all := test.NewNumbers(profile.NumberKind) @@ -141,14 +167,17 @@ func TestMinMaxSumCountMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - agg1.Checkpoint(descriptor) - agg2.Checkpoint(descriptor) + agg1.Checkpoint(ckpt1, descriptor) + agg2.Checkpoint(ckpt2, descriptor) + + checkZero(t, agg1, descriptor) + checkZero(t, agg2, descriptor) - test.CheckedMerge(t, agg1, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) all.Sort() - aggSum, err := agg1.Sum() + aggSum, err := ckpt1.Sum() require.Nil(t, err) allSum := all.Sum() require.InEpsilon(t, @@ -157,18 +186,18 @@ func TestMinMaxSumCountMerge(t *testing.T) { 0.000000001, "Same sum - absolute") - count, err := agg1.Count() + count, err := ckpt1.Count() require.Equal(t, all.Count(), count, "Same count - absolute") require.Nil(t, err) - min, err := agg1.Min() + min, err := ckpt1.Min() require.Nil(t, err) require.Equal(t, all.Min(), min, "Same min - absolute") - max, err := agg1.Max() + max, err := ckpt1.Max() require.Nil(t, err) require.Equal(t, all.Max(), @@ -182,17 +211,19 @@ func TestMaxSumCountNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) agg := New(descriptor) - agg.Checkpoint(descriptor) + ckpt := New(descriptor) + + agg.Checkpoint(ckpt, descriptor) - asum, err := agg.Sum() + asum, err := ckpt.Sum() require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") require.Nil(t, err) - count, err := agg.Count() + count, err := ckpt.Count() require.Equal(t, int64(0), count, "Empty checkpoint count = 0") require.Nil(t, err) - max, err := agg.Max() + max, err := ckpt.Max() require.Equal(t, aggregator.ErrNoData, err) require.Equal(t, metric.Number(0), max) }) From db7948da4295877c2a914bf1baf443d9a6c067aa Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 14:00:32 -0700 Subject: [PATCH 04/21] Update LastValue --- sdk/metric/aggregator/lastvalue/lastvalue.go | 33 +++++++-------- .../aggregator/lastvalue/lastvalue_test.go | 40 +++++++++++++------ sdk/metric/aggregator/sum/sum_test.go | 14 +++++++ sdk/metric/aggregator/test/test.go | 18 --------- 4 files changed, 58 insertions(+), 47 deletions(-) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue.go b/sdk/metric/aggregator/lastvalue/lastvalue.go index 8c2f74cb31c..41775e0028f 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue.go @@ -29,11 +29,8 @@ type ( // Aggregator aggregates lastValue events. Aggregator struct { - // current is an atomic pointer to *lastValueData. It is never nil. - current unsafe.Pointer - - // checkpoint is a copy of the current value taken in Checkpoint() - checkpoint unsafe.Pointer + // value is an atomic pointer to *lastValueData. It is never nil. + value unsafe.Pointer } // lastValueData stores the current value of a lastValue along with @@ -62,8 +59,7 @@ var unsetLastValue = &lastValueData{} // last value and timestamp that were recorded. func New() *Aggregator { return &Aggregator{ - current: unsafe.Pointer(unsetLastValue), - checkpoint: unsafe.Pointer(unsetLastValue), + value: unsafe.Pointer(unsetLastValue), } } @@ -72,16 +68,21 @@ func New() *Aggregator { // will be returned if (due to a race condition) the checkpoint was // computed before the first value was set. func (g *Aggregator) LastValue() (metric.Number, time.Time, error) { - gd := (*lastValueData)(g.checkpoint) + gd := (*lastValueData)(g.value) if gd == unsetLastValue { - return metric.Number(0), time.Time{}, aggregator.ErrNoData + return 0, time.Time{}, aggregator.ErrNoData } return gd.value.AsNumber(), gd.timestamp, nil } // Checkpoint atomically saves the current value. -func (g *Aggregator) Checkpoint(*metric.Descriptor) { - g.checkpoint = atomic.LoadPointer(&g.current) +func (g *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) error { + o, _ := oa.(*Aggregator) + if o == nil { + return aggregator.NewInconsistentAggregatorError(g, oa) + } + o.value = atomic.SwapPointer(&g.value, unsafe.Pointer(unsetLastValue)) + return nil } // Update atomically sets the current "last" value. @@ -90,7 +91,7 @@ func (g *Aggregator) Update(_ context.Context, number metric.Number, desc *metri value: number, timestamp: time.Now(), } - atomic.StorePointer(&g.current, unsafe.Pointer(ngd)) + atomic.StorePointer(&g.value, unsafe.Pointer(ngd)) return nil } @@ -99,16 +100,16 @@ func (g *Aggregator) Update(_ context.Context, number metric.Number, desc *metri func (g *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.NewInconsistentMergeError(g, oa) + return aggregator.NewInconsistentAggregatorError(g, oa) } - ggd := (*lastValueData)(atomic.LoadPointer(&g.checkpoint)) - ogd := (*lastValueData)(atomic.LoadPointer(&o.checkpoint)) + ggd := (*lastValueData)(atomic.LoadPointer(&g.value)) + ogd := (*lastValueData)(atomic.LoadPointer(&o.value)) if ggd.timestamp.After(ogd.timestamp) { return nil } - g.checkpoint = unsafe.Pointer(ogd) + g.value = unsafe.Pointer(ogd) return nil } diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go index d706927dddc..d3c4a71a97b 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue_test.go @@ -15,9 +15,11 @@ package lastvalue import ( + "errors" "math/rand" "os" "testing" + "time" "unsafe" "github.com/stretchr/testify/require" @@ -48,9 +50,17 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func checkZero(t *testing.T, agg *Aggregator) { + lv, ts, err := agg.LastValue() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, time.Time{}, ts) + require.Equal(t, metric.Number(0), lv) +} + func TestLastValueUpdate(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() + ckpt := New() record := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind) @@ -61,9 +71,10 @@ func TestLastValueUpdate(t *testing.T) { test.CheckedUpdate(t, agg, x, record) } - agg.Checkpoint(record) + err := agg.Checkpoint(ckpt, record) + require.NoError(t, err) - lv, _, err := agg.LastValue() + lv, _, err := ckpt.LastValue() require.Equal(t, last, lv, "Same last value - non-monotonic") require.Nil(t, err) }) @@ -73,6 +84,8 @@ func TestLastValueMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg1 := New() agg2 := New() + ckpt1 := New() + ckpt2 := New() descriptor := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind) @@ -83,18 +96,21 @@ func TestLastValueMerge(t *testing.T) { test.CheckedUpdate(t, agg1, first1, descriptor) test.CheckedUpdate(t, agg2, first2, descriptor) - agg1.Checkpoint(descriptor) - agg2.Checkpoint(descriptor) + _ = agg1.Checkpoint(ckpt1, descriptor) + _ = agg2.Checkpoint(ckpt2, descriptor) + + checkZero(t, agg1) + checkZero(t, agg2) - _, t1, err := agg1.LastValue() + _, t1, err := ckpt1.LastValue() require.Nil(t, err) - _, t2, err := agg2.LastValue() + _, t2, err := ckpt2.LastValue() require.Nil(t, err) require.True(t, t1.Before(t2)) - test.CheckedMerge(t, agg1, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) - lv, ts, err := agg1.LastValue() + lv, ts, err := ckpt1.LastValue() require.Nil(t, err) require.Equal(t, t2, ts, "Merged timestamp - non-monotonic") require.Equal(t, first2, lv, "Merged value - non-monotonic") @@ -105,10 +121,8 @@ func TestLastValueNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueObserverKind, metric.Int64NumberKind) g := New() - g.Checkpoint(descriptor) + ckpt := New() + g.Checkpoint(ckpt, descriptor) - value, timestamp, err := g.LastValue() - require.Equal(t, aggregator.ErrNoData, err) - require.True(t, timestamp.IsZero()) - require.Equal(t, metric.Number(0), value) + checkZero(t, g) } diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index a0ba38f1eed..db00a99d40e 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -43,6 +43,14 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { + kind := desc.NumberKind() + + sum, err := agg.Sum() + require.NoError(t, err) + require.Equal(t, kind.Zero(), sum) +} + func TestCounterSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() @@ -60,6 +68,8 @@ func TestCounterSum(t *testing.T) { err := agg.Checkpoint(ckpt, descriptor) require.NoError(t, err) + checkZero(t, agg, descriptor) + asum, err := ckpt.Sum() require.Equal(t, sum, asum, "Same sum - monotonic") require.Nil(t, err) @@ -85,6 +95,7 @@ func TestValueRecorderSum(t *testing.T) { } agg.Checkpoint(ckpt, descriptor) + checkZero(t, agg, descriptor) asum, err := ckpt.Sum() require.Equal(t, sum, asum, "Same sum - monotonic") @@ -113,6 +124,9 @@ func TestCounterMerge(t *testing.T) { agg1.Checkpoint(ckpt1, descriptor) agg2.Checkpoint(ckpt2, descriptor) + checkZero(t, agg1, descriptor) + checkZero(t, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) sum.AddNumber(descriptor.NumberKind(), sum) diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 53cf31959c1..c27d8e24174 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -17,13 +17,10 @@ package test import ( "context" "math/rand" - "os" "sort" "testing" - "unsafe" "go.opentelemetry.io/otel/api/metric" - ottest "go.opentelemetry.io/otel/internal/testing" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) @@ -66,21 +63,6 @@ func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { } } -// Ensure local struct alignment prior to running tests. -func TestMain(m *testing.M) { - fields := []ottest.FieldOffset{ - { - Name: "Numbers.numbers", - Offset: unsafe.Offsetof(Numbers{}.numbers), - }, - } - if !ottest.Aligned8Byte(fields, os.Stderr) { - os.Exit(1) - } - - os.Exit(m.Run()) -} - // TODO: Expose Numbers in api/metric for sorting support type Numbers struct { From b47878081a770e00ee121fe78b444747aa1cd45b Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 15:18:48 -0700 Subject: [PATCH 05/21] Finish the aggregators --- sdk/metric/aggregator/ddsketch/ddsketch.go | 47 +++++++------- .../aggregator/ddsketch/ddsketch_test.go | 59 +++++++++++++---- sdk/metric/aggregator/histogram/histogram.go | 44 +++++++------ .../aggregator/histogram/histogram_test.go | 63 +++++++++++-------- 4 files changed, 129 insertions(+), 84 deletions(-) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 32069c03136..b9aaf836845 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -31,11 +31,10 @@ type Config = sdk.Config // Aggregator aggregates events into a distribution. type Aggregator struct { - lock sync.Mutex - cfg *Config - kind metric.NumberKind - current *sdk.DDSketch - checkpoint *sdk.DDSketch + lock sync.Mutex + cfg *Config + kind metric.NumberKind + sketch *sdk.DDSketch } var _ export.Aggregator = &Aggregator{} @@ -45,30 +44,25 @@ var _ aggregator.Distribution = &Aggregator{} // New returns a new DDSketch aggregator. func New(desc *metric.Descriptor, cfg *Config) *Aggregator { return &Aggregator{ - cfg: cfg, - kind: desc.NumberKind(), - current: sdk.NewDDSketch(cfg), - checkpoint: sdk.NewDDSketch(cfg), + cfg: cfg, + kind: desc.NumberKind(), + sketch: sdk.NewDDSketch(cfg), } } // NewDefaultConfig returns a new, default DDSketch config. -// -// TODO: Should the Config constructor set minValue to -Inf to -// when the descriptor has absolute=false? This requires providing -// values for alpha and maxNumBins, apparently. func NewDefaultConfig() *Config { return sdk.NewDefaultConfig() } // Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (metric.Number, error) { - return c.toNumber(c.checkpoint.Sum()), nil + return c.toNumber(c.sketch.Sum()), nil } // Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { - return c.checkpoint.Count(), nil + return c.sketch.Count(), nil } // Max returns the maximum value in the checkpoint. @@ -84,10 +78,10 @@ func (c *Aggregator) Min() (metric.Number, error) { // Quantile returns the estimated quantile of data in the checkpoint. // It is an error if `q` is less than 0 or greated than 1. func (c *Aggregator) Quantile(q float64) (metric.Number, error) { - if c.checkpoint.Count() == 0 { + if c.sketch.Count() == 0 { return metric.Number(0), aggregator.ErrNoData } - f := c.checkpoint.Quantile(q) + f := c.sketch.Quantile(q) if math.IsNaN(f) { return metric.Number(0), aggregator.ErrInvalidQuantile } @@ -102,14 +96,19 @@ func (c *Aggregator) toNumber(f float64) metric.Number { } // Checkpoint saves the current state and resets the current state to -// the empty set, taking a lock to prevent concurrent Update() calls. -func (c *Aggregator) Checkpoint(*metric.Descriptor) { +// a new sketch, taking a lock to prevent concurrent Update() calls. +func (c *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) error { + o, _ := oa.(*Aggregator) + if o == nil { + return aggregator.NewInconsistentAggregatorError(c, oa) + } replace := sdk.NewDDSketch(c.cfg) c.lock.Lock() - c.checkpoint = c.current - c.current = replace + o.sketch, c.sketch = c.sketch, replace c.lock.Unlock() + + return nil } // Update adds the recorded measurement to the current data set. @@ -118,7 +117,7 @@ func (c *Aggregator) Checkpoint(*metric.Descriptor) { func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error { c.lock.Lock() defer c.lock.Unlock() - c.current.Add(number.CoerceToFloat64(desc.NumberKind())) + c.sketch.Add(number.CoerceToFloat64(desc.NumberKind())) return nil } @@ -126,9 +125,9 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri func (c *Aggregator) Merge(oa export.Aggregator, d *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.NewInconsistentMergeError(c, oa) + return aggregator.NewInconsistentAggregatorError(c, oa) } - c.checkpoint.Merge(o.checkpoint) + c.sketch.Merge(o.sketch) return nil } diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 76fdcdd3d4f..21447dc2e95 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -15,12 +15,14 @@ package ddsketch import ( + "errors" "fmt" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/api/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) @@ -29,9 +31,34 @@ const count = 1000 type updateTest struct { } +func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { + kind := desc.NumberKind() + + sum, err := agg.Sum() + require.NoError(t, err) + require.Equal(t, kind.Zero(), sum) + + count, err := agg.Count() + require.NoError(t, err) + require.Equal(t, int64(0), count) + + max, err := agg.Max() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), max) + + median, err := agg.Quantile(0.5) + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), median) + + min, err := agg.Min() + require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.Equal(t, kind.Zero(), min) +} + func (ut *updateTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) agg := New(descriptor, NewDefaultConfig()) + ckpt := New(descriptor, NewDefaultConfig()) all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { @@ -44,11 +71,14 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { test.CheckedUpdate(t, agg, y, descriptor) } - agg.Checkpoint(descriptor) + err := agg.Checkpoint(ckpt, descriptor) + require.NoError(t, err) + + checkZero(t, agg, descriptor) all.Sort() - sum, err := agg.Sum() + sum, err := ckpt.Sum() require.Nil(t, err) allSum := all.Sum() require.InDelta(t, @@ -57,18 +87,18 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { 1, "Same sum") - count, err := agg.Count() + count, err := ckpt.Count() require.Equal(t, all.Count(), count, "Same count") require.Nil(t, err) - max, err := agg.Max() + max, err := ckpt.Max() require.Nil(t, err) require.Equal(t, all.Max(), max, "Same max") - median, err := agg.Quantile(0.5) + median, err := ckpt.Quantile(0.5) require.Nil(t, err) allMedian := all.Median() require.InDelta(t, @@ -92,6 +122,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { agg1 := New(descriptor, NewDefaultConfig()) agg2 := New(descriptor, NewDefaultConfig()) + ckpt1 := New(descriptor, NewDefaultConfig()) + ckpt2 := New(descriptor, NewDefaultConfig()) all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { @@ -118,14 +150,17 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - agg1.Checkpoint(descriptor) - agg2.Checkpoint(descriptor) + _ = agg1.Checkpoint(ckpt1, descriptor) + _ = agg2.Checkpoint(ckpt2, descriptor) + + checkZero(t, agg1, descriptor) + checkZero(t, agg1, descriptor) - test.CheckedMerge(t, agg1, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) all.Sort() - aggSum, err := agg1.Sum() + aggSum, err := ckpt1.Sum() require.Nil(t, err) allSum := all.Sum() require.InDelta(t, @@ -134,18 +169,18 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { 1, "Same sum") - count, err := agg1.Count() + count, err := ckpt1.Count() require.Equal(t, all.Count(), count, "Same count") require.Nil(t, err) - max, err := agg1.Max() + max, err := ckpt1.Max() require.Nil(t, err) require.Equal(t, all.Max(), max, "Same max") - median, err := agg1.Quantile(0.5) + median, err := ckpt1.Quantile(0.5) require.Nil(t, err) allMedian := all.Median() require.InDelta(t, diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go index 11aaaf229a0..4cd9b933f3f 100644 --- a/sdk/metric/aggregator/histogram/histogram.go +++ b/sdk/metric/aggregator/histogram/histogram.go @@ -34,10 +34,9 @@ type ( // It also calculates the sum and count of all events. Aggregator struct { lock sync.Mutex - current state - checkpoint state boundaries []float64 kind metric.NumberKind + state } // state represents the state of a histogram, consisting of @@ -74,32 +73,25 @@ func New(desc *metric.Descriptor, boundaries []float64) *Aggregator { return &Aggregator{ kind: desc.NumberKind(), boundaries: sortedBoundaries, - current: emptyState(sortedBoundaries), - checkpoint: emptyState(sortedBoundaries), + state: emptyState(sortedBoundaries), } } // Sum returns the sum of all values in the checkpoint. func (c *Aggregator) Sum() (metric.Number, error) { - c.lock.Lock() - defer c.lock.Unlock() - return c.checkpoint.sum, nil + return c.sum, nil } // Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { - c.lock.Lock() - defer c.lock.Unlock() - return int64(c.checkpoint.count), nil + return int64(c.count), nil } // Histogram returns the count of events in pre-determined buckets. func (c *Aggregator) Histogram() (aggregator.Buckets, error) { - c.lock.Lock() - defer c.lock.Unlock() return aggregator.Buckets{ Boundaries: c.boundaries, - Counts: c.checkpoint.bucketCounts, + Counts: c.bucketCounts, }, nil } @@ -107,10 +99,16 @@ func (c *Aggregator) Histogram() (aggregator.Buckets, error) { // the empty set. Since no locks are taken, there is a chance that // the independent Sum, Count and Bucket Count are not consistent with each // other. -func (c *Aggregator) Checkpoint(desc *metric.Descriptor) { +func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) error { + o, _ := oa.(*Aggregator) + if o == nil { + return aggregator.NewInconsistentAggregatorError(c, oa) + } + c.lock.Lock() - c.checkpoint, c.current = c.current, emptyState(c.boundaries) + o.state, c.state = c.state, emptyState(c.boundaries) c.lock.Unlock() + return nil } func emptyState(boundaries []float64) state { @@ -146,9 +144,9 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri c.lock.Lock() defer c.lock.Unlock() - c.current.count.AddInt64(1) - c.current.sum.AddNumber(kind, number) - c.current.bucketCounts[bucketID]++ + c.count.AddInt64(1) + c.sum.AddNumber(kind, number) + c.bucketCounts[bucketID]++ return nil } @@ -157,14 +155,14 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.NewInconsistentMergeError(c, oa) + return aggregator.NewInconsistentAggregatorError(c, oa) } - c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum) - c.checkpoint.count.AddNumber(metric.Uint64NumberKind, o.checkpoint.count) + c.sum.AddNumber(desc.NumberKind(), o.sum) + c.count.AddNumber(metric.Uint64NumberKind, o.count) - for i := 0; i < len(c.checkpoint.bucketCounts); i++ { - c.checkpoint.bucketCounts[i] += o.checkpoint.bucketCounts[i] + for i := 0; i < len(c.bucketCounts); i++ { + c.bucketCounts[i] += o.bucketCounts[i] } return nil } diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go index c9e32ed43bc..e8545fcb34c 100644 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ b/sdk/metric/aggregator/histogram/histogram_test.go @@ -60,6 +60,25 @@ var ( boundaries = []float64{500, 250, 750} ) +func checkZero(t *testing.T, agg *histogram.Aggregator, desc *metric.Descriptor) { + asum, err := agg.Sum() + require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") + require.NoError(t, err) + + count, err := agg.Count() + require.Equal(t, int64(0), count, "Empty checkpoint count = 0") + require.NoError(t, err) + + buckets, err := agg.Histogram() + require.NoError(t, err) + + require.Equal(t, len(buckets.Counts), len(boundaries)+1, "There should be b + 1 counts, where b is the number of boundaries") + for i, bCount := range buckets.Counts { + require.Equal(t, uint64(0), uint64(bCount), "Bucket #%d must have 0 observed values", i) + } + +} + func TestHistogramAbsolute(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { testHistogram(t, profile, positiveOnly) @@ -83,6 +102,7 @@ func testHistogram(t *testing.T, profile test.Profile, policy policy) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) agg := histogram.New(descriptor, boundaries) + ckpt := histogram.New(descriptor, boundaries) all := test.NewNumbers(profile.NumberKind) @@ -92,11 +112,13 @@ func testHistogram(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - agg.Checkpoint(descriptor) + agg.Checkpoint(ckpt, descriptor) + + checkZero(t, agg, descriptor) all.Sort() - asum, err := agg.Sum() + asum, err := ckpt.Sum() sum := all.Sum() require.InEpsilon(t, sum.CoerceToFloat64(profile.NumberKind), @@ -105,11 +127,11 @@ func testHistogram(t *testing.T, profile test.Profile, policy policy) { "Same sum - "+policy.name) require.NoError(t, err) - count, err := agg.Count() + count, err := ckpt.Count() require.Equal(t, all.Count(), count, "Same count -"+policy.name) require.NoError(t, err) - buckets, err := agg.Histogram() + buckets, err := ckpt.Histogram() require.NoError(t, err) require.Equal(t, len(buckets.Counts), len(boundaries)+1, "There should be b + 1 counts, where b is the number of boundaries") @@ -140,6 +162,8 @@ func TestHistogramMerge(t *testing.T) { agg1 := histogram.New(descriptor, boundaries) agg2 := histogram.New(descriptor, boundaries) + ckpt1 := histogram.New(descriptor, boundaries) + ckpt2 := histogram.New(descriptor, boundaries) all := test.NewNumbers(profile.NumberKind) @@ -154,14 +178,14 @@ func TestHistogramMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - agg1.Checkpoint(descriptor) - agg2.Checkpoint(descriptor) + agg1.Checkpoint(ckpt1, descriptor) + agg2.Checkpoint(ckpt2, descriptor) - test.CheckedMerge(t, agg1, agg2, descriptor) + test.CheckedMerge(t, ckpt1, ckpt2, descriptor) all.Sort() - asum, err := agg1.Sum() + asum, err := ckpt1.Sum() sum := all.Sum() require.InEpsilon(t, sum.CoerceToFloat64(profile.NumberKind), @@ -170,11 +194,11 @@ func TestHistogramMerge(t *testing.T) { "Same sum - absolute") require.NoError(t, err) - count, err := agg1.Count() + count, err := ckpt1.Count() require.Equal(t, all.Count(), count, "Same count - absolute") require.NoError(t, err) - buckets, err := agg1.Histogram() + buckets, err := ckpt1.Histogram() require.NoError(t, err) require.Equal(t, len(buckets.Counts), len(boundaries)+1, "There should be b + 1 counts, where b is the number of boundaries") @@ -192,23 +216,12 @@ func TestHistogramNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) agg := histogram.New(descriptor, boundaries) - agg.Checkpoint(descriptor) - - asum, err := agg.Sum() - require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") + ckpt := histogram.New(descriptor, boundaries) + err := agg.Checkpoint(ckpt, descriptor) require.NoError(t, err) - count, err := agg.Count() - require.Equal(t, int64(0), count, "Empty checkpoint count = 0") - require.NoError(t, err) - - buckets, err := agg.Histogram() - require.NoError(t, err) - - require.Equal(t, len(buckets.Counts), len(boundaries)+1, "There should be b + 1 counts, where b is the number of boundaries") - for i, bCount := range buckets.Counts { - require.Equal(t, uint64(0), uint64(bCount), "Bucket #%d must have 0 observed values", i) - } + checkZero(t, agg, descriptor) + checkZero(t, ckpt, descriptor) }) } From d7badfd301844d9c5dca8fd14b7bb7e8800de826 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 16:08:37 -0700 Subject: [PATCH 06/21] Fix the integrator --- sdk/export/metric/metric.go | 20 ++- sdk/metric/integrator/simple/simple.go | 2 +- sdk/metric/integrator/simple/simple_test.go | 172 ++++++++++++++------ sdk/metric/integrator/test/test.go | 107 +----------- 4 files changed, 145 insertions(+), 156 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index c7e8e63d873..4b4b65fdbb4 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -73,17 +73,23 @@ type Integrator interface { // AggregationSelector supports selecting the kind of Aggregator to // use at runtime for a specific metric instrument. type AggregationSelector interface { - // AggregatorFor returns two aggregators of a kind suited to the - // requested export. Returning `nil` indicates to ignore this - // metric instrument. This must return a consistent type to - // avoid confusion in later stages of the metrics export - // process, i.e., when Merging multiple aggregators for a - // specific instrument. + // AggregatorFor allocates a variable number of aggregators of + // a kind suitable for the requested export. This method + // supports a variable-number of allocations to support making + // a single allocation. + // + // When the call returns without initializing the *Aggregator + // to a non-nil value, the metric instrument is explicitly + // disabled. + // + // This must return a consistent type to avoid confusion in + // later stages of the metrics export process, i.e., when + // Merging multiple aggregators for a specific instrument. // // Note: This is context-free because the aggregator should // not relate to the incoming context. This call should not // block. - AggregatorFor(*metric.Descriptor) [2]Aggregator + AggregatorFor(*metric.Descriptor, ...*Aggregator) } // Aggregator implements a specific aggregation behavior, e.g., a diff --git a/sdk/metric/integrator/simple/simple.go b/sdk/metric/integrator/simple/simple.go index 178c21fe043..20b2fb916c4 100644 --- a/sdk/metric/integrator/simple/simple.go +++ b/sdk/metric/integrator/simple/simple.go @@ -89,7 +89,7 @@ func (b *Integrator) Process(record export.Record) error { tmp := agg // Note: the call to AggregatorFor() followed by Merge // is effectively a Clone() operation. - agg = b.AggregatorFor(desc) + b.AggregatorFor(desc, &agg) if err := agg.Merge(tmp, desc); err != nil { return err } diff --git a/sdk/metric/integrator/simple/simple_test.go b/sdk/metric/integrator/simple/simple_test.go index eecbf7cc7ed..0f09d52b527 100644 --- a/sdk/metric/integrator/simple/simple_test.go +++ b/sdk/metric/integrator/simple/simple_test.go @@ -18,66 +18,133 @@ import ( "context" "testing" - "go.opentelemetry.io/otel/api/metric" - "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/kv" + "go.opentelemetry.io/otel/api/label" + "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" + "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" "go.opentelemetry.io/otel/sdk/metric/integrator/simple" "go.opentelemetry.io/otel/sdk/metric/integrator/test" + "go.opentelemetry.io/otel/sdk/resource" +) + +// Note: This var block and the helpers below will disappear in a +// future PR (see the draft in #799). The test has been completely +// rewritten there, so this code will simply be dropped. + +var ( + // Resource is applied to all test records built in this package. + Resource = resource.New(kv.String("R", "V")) + + // LastValueADesc and LastValueBDesc group by "G" + LastValueADesc = metric.NewDescriptor( + "lastvalue.a", metric.ValueObserverKind, metric.Int64NumberKind) + LastValueBDesc = metric.NewDescriptor( + "lastvalue.b", metric.ValueObserverKind, metric.Int64NumberKind) + // CounterADesc and CounterBDesc group by "C" + CounterADesc = metric.NewDescriptor( + "sum.a", metric.CounterKind, metric.Int64NumberKind) + CounterBDesc = metric.NewDescriptor( + "sum.b", metric.CounterKind, metric.Int64NumberKind) + + // LastValue groups are (labels1), (labels2+labels3) + // Counter groups are (labels1+labels2), (labels3) + + // Labels1 has G=H and C=D + Labels1 = makeLabels(kv.String("G", "H"), kv.String("C", "D")) + // Labels2 has C=D and E=F + Labels2 = makeLabels(kv.String("C", "D"), kv.String("E", "F")) + // Labels3 is the empty set + Labels3 = makeLabels() ) -// These tests use the ../test label encoding. +func makeLabels(labels ...kv.KeyValue) *label.Set { + s := label.NewSet(labels...) + return &s +} + +// LastValueAgg returns a checkpointed lastValue aggregator w/ the specified descriptor and value. +func LastValueAgg(desc *metric.Descriptor, v int64) export.Aggregator { + ctx := context.Background() + gagg := lastvalue.New() + ckpt := lastvalue.New() + _ = gagg.Update(ctx, metric.NewInt64Number(v), desc) + _ = gagg.Checkpoint(ckpt, desc) + return ckpt +} + +// Convenience method for building a test exported lastValue record. +func NewLastValueRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { + return export.NewRecord(desc, labels, Resource, LastValueAgg(desc, value)) +} + +// Convenience method for building a test exported counter record. +func NewCounterRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { + return export.NewRecord(desc, labels, Resource, CounterAgg(desc, value)) +} + +// CounterAgg returns a checkpointed counter aggregator w/ the specified descriptor and value. +func CounterAgg(desc *metric.Descriptor, v int64) export.Aggregator { + ctx := context.Background() + cagg := sum.New() + ckpt := sum.New() + _ = cagg.Update(ctx, metric.NewInt64Number(v), desc) + cagg.Checkpoint(ckpt, desc) + return ckpt +} func TestSimpleStateless(t *testing.T) { b := simple.New(test.NewAggregationSelector(), false) // Set initial lastValue values - _ = b.Process(test.NewLastValueRecord(&test.LastValueADesc, test.Labels1, 10)) - _ = b.Process(test.NewLastValueRecord(&test.LastValueADesc, test.Labels2, 20)) - _ = b.Process(test.NewLastValueRecord(&test.LastValueADesc, test.Labels3, 30)) + _ = b.Process(NewLastValueRecord(&LastValueADesc, Labels1, 10)) + _ = b.Process(NewLastValueRecord(&LastValueADesc, Labels2, 20)) + _ = b.Process(NewLastValueRecord(&LastValueADesc, Labels3, 30)) - _ = b.Process(test.NewLastValueRecord(&test.LastValueBDesc, test.Labels1, 10)) - _ = b.Process(test.NewLastValueRecord(&test.LastValueBDesc, test.Labels2, 20)) - _ = b.Process(test.NewLastValueRecord(&test.LastValueBDesc, test.Labels3, 30)) + _ = b.Process(NewLastValueRecord(&LastValueBDesc, Labels1, 10)) + _ = b.Process(NewLastValueRecord(&LastValueBDesc, Labels2, 20)) + _ = b.Process(NewLastValueRecord(&LastValueBDesc, Labels3, 30)) // Another lastValue Set for Labels1 - _ = b.Process(test.NewLastValueRecord(&test.LastValueADesc, test.Labels1, 50)) - _ = b.Process(test.NewLastValueRecord(&test.LastValueBDesc, test.Labels1, 50)) + _ = b.Process(NewLastValueRecord(&LastValueADesc, Labels1, 50)) + _ = b.Process(NewLastValueRecord(&LastValueBDesc, Labels1, 50)) // Set initial counter values - _ = b.Process(test.NewCounterRecord(&test.CounterADesc, test.Labels1, 10)) - _ = b.Process(test.NewCounterRecord(&test.CounterADesc, test.Labels2, 20)) - _ = b.Process(test.NewCounterRecord(&test.CounterADesc, test.Labels3, 40)) + _ = b.Process(NewCounterRecord(&CounterADesc, Labels1, 10)) + _ = b.Process(NewCounterRecord(&CounterADesc, Labels2, 20)) + _ = b.Process(NewCounterRecord(&CounterADesc, Labels3, 40)) - _ = b.Process(test.NewCounterRecord(&test.CounterBDesc, test.Labels1, 10)) - _ = b.Process(test.NewCounterRecord(&test.CounterBDesc, test.Labels2, 20)) - _ = b.Process(test.NewCounterRecord(&test.CounterBDesc, test.Labels3, 40)) + _ = b.Process(NewCounterRecord(&CounterBDesc, Labels1, 10)) + _ = b.Process(NewCounterRecord(&CounterBDesc, Labels2, 20)) + _ = b.Process(NewCounterRecord(&CounterBDesc, Labels3, 40)) // Another counter Add for Labels1 - _ = b.Process(test.NewCounterRecord(&test.CounterADesc, test.Labels1, 50)) - _ = b.Process(test.NewCounterRecord(&test.CounterBDesc, test.Labels1, 50)) + _ = b.Process(NewCounterRecord(&CounterADesc, Labels1, 50)) + _ = b.Process(NewCounterRecord(&CounterBDesc, Labels1, 50)) checkpointSet := b.CheckpointSet() - records := test.NewOutput(test.SdkEncoder) + records := test.NewOutput(label.DefaultEncoder()) _ = checkpointSet.ForEach(records.AddTo) // Output lastvalue should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. require.EqualValues(t, map[string]float64{ - "sum.a/C~D&G~H/R~V": 60, // labels1 - "sum.a/C~D&E~F/R~V": 20, // labels2 - "sum.a//R~V": 40, // labels3 - "sum.b/C~D&G~H/R~V": 60, // labels1 - "sum.b/C~D&E~F/R~V": 20, // labels2 - "sum.b//R~V": 40, // labels3 - "lastvalue.a/C~D&G~H/R~V": 50, // labels1 - "lastvalue.a/C~D&E~F/R~V": 20, // labels2 - "lastvalue.a//R~V": 30, // labels3 - "lastvalue.b/C~D&G~H/R~V": 50, // labels1 - "lastvalue.b/C~D&E~F/R~V": 20, // labels2 - "lastvalue.b//R~V": 30, // labels3 + "sum.a/C=D,G=H/R=V": 60, // labels1 + "sum.a/C=D,E=F/R=V": 20, // labels2 + "sum.a//R=V": 40, // labels3 + "sum.b/C=D,G=H/R=V": 60, // labels1 + "sum.b/C=D,E=F/R=V": 20, // labels2 + "sum.b//R=V": 40, // labels3 + "lastvalue.a/C=D,G=H/R=V": 50, // labels1 + "lastvalue.a/C=D,E=F/R=V": 20, // labels2 + "lastvalue.a//R=V": 30, // labels3 + "lastvalue.b/C=D,G=H/R=V": 50, // labels1 + "lastvalue.b/C=D,E=F/R=V": 20, // labels2 + "lastvalue.b//R=V": 30, // labels3 }, records.Map) b.FinishedCollection() @@ -94,62 +161,67 @@ func TestSimpleStateful(t *testing.T) { ctx := context.Background() b := simple.New(test.NewAggregationSelector(), true) - counterA := test.NewCounterRecord(&test.CounterADesc, test.Labels1, 10) - caggA := counterA.Aggregator() + counterA := NewCounterRecord(&CounterADesc, Labels1, 10) _ = b.Process(counterA) - counterB := test.NewCounterRecord(&test.CounterBDesc, test.Labels1, 10) - caggB := counterB.Aggregator() + counterB := NewCounterRecord(&CounterBDesc, Labels1, 10) _ = b.Process(counterB) checkpointSet := b.CheckpointSet() b.FinishedCollection() - records1 := test.NewOutput(test.SdkEncoder) + records1 := test.NewOutput(label.DefaultEncoder()) _ = checkpointSet.ForEach(records1.AddTo) require.EqualValues(t, map[string]float64{ - "sum.a/C~D&G~H/R~V": 10, // labels1 - "sum.b/C~D&G~H/R~V": 10, // labels1 + "sum.a/C=D,G=H/R=V": 10, // labels1 + "sum.b/C=D,G=H/R=V": 10, // labels1 }, records1.Map) + caggA := sum.New() + caggB := sum.New() + ckptA := sum.New() + ckptB := sum.New() + // Test that state was NOT reset checkpointSet = b.CheckpointSet() - records2 := test.NewOutput(test.SdkEncoder) + records2 := test.NewOutput(label.DefaultEncoder()) _ = checkpointSet.ForEach(records2.AddTo) require.EqualValues(t, records1.Map, records2.Map) b.FinishedCollection() // Update and re-checkpoint the original record. - _ = caggA.Update(ctx, metric.NewInt64Number(20), &test.CounterADesc) - _ = caggB.Update(ctx, metric.NewInt64Number(20), &test.CounterBDesc) - caggA.Checkpoint(&test.CounterADesc) - caggB.Checkpoint(&test.CounterBDesc) + _ = caggA.Update(ctx, metric.NewInt64Number(20), &CounterADesc) + _ = caggB.Update(ctx, metric.NewInt64Number(20), &CounterBDesc) + err := caggA.Checkpoint(ckptA, &CounterADesc) + require.NoError(t, err) + err = caggB.Checkpoint(ckptB, &CounterBDesc) + require.NoError(t, err) // As yet cagg has not been passed to Integrator.Process. Should // not see an update. checkpointSet = b.CheckpointSet() - records3 := test.NewOutput(test.SdkEncoder) + records3 := test.NewOutput(label.DefaultEncoder()) _ = checkpointSet.ForEach(records3.AddTo) require.EqualValues(t, records1.Map, records3.Map) b.FinishedCollection() // Now process the second update - _ = b.Process(export.NewRecord(&test.CounterADesc, test.Labels1, test.Resource, caggA)) - _ = b.Process(export.NewRecord(&test.CounterBDesc, test.Labels1, test.Resource, caggB)) + _ = b.Process(export.NewRecord(&CounterADesc, Labels1, Resource, ckptA)) + _ = b.Process(export.NewRecord(&CounterBDesc, Labels1, Resource, ckptB)) checkpointSet = b.CheckpointSet() - records4 := test.NewOutput(test.SdkEncoder) + records4 := test.NewOutput(label.DefaultEncoder()) _ = checkpointSet.ForEach(records4.AddTo) require.EqualValues(t, map[string]float64{ - "sum.a/C~D&G~H/R~V": 30, - "sum.b/C~D&G~H/R~V": 30, + "sum.a/C=D,G=H/R=V": 30, + "sum.b/C=D,G=H/R=V": 30, }, records4.Map) b.FinishedCollection() } diff --git a/sdk/metric/integrator/test/test.go b/sdk/metric/integrator/test/test.go index 008cfeb1dfc..a3b4db3faa8 100644 --- a/sdk/metric/integrator/test/test.go +++ b/sdk/metric/integrator/test/test.go @@ -15,24 +15,17 @@ package test import ( - "context" "fmt" - "strings" - "go.opentelemetry.io/otel/api/kv" "go.opentelemetry.io/otel/api/label" "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/resource" ) type ( - // Encoder is an alternate label encoder to validate grouping logic. - Encoder struct{} - // Output collects distinct metric/label set outputs. Output struct { Map map[string]float64 @@ -45,39 +38,6 @@ type ( testAggregationSelector struct{} ) -var ( - // Resource is applied to all test records built in this package. - Resource = resource.New(kv.String("R", "V")) - - // LastValueADesc and LastValueBDesc group by "G" - LastValueADesc = metric.NewDescriptor( - "lastvalue.a", metric.ValueObserverKind, metric.Int64NumberKind) - LastValueBDesc = metric.NewDescriptor( - "lastvalue.b", metric.ValueObserverKind, metric.Int64NumberKind) - // CounterADesc and CounterBDesc group by "C" - CounterADesc = metric.NewDescriptor( - "sum.a", metric.CounterKind, metric.Int64NumberKind) - CounterBDesc = metric.NewDescriptor( - "sum.b", metric.CounterKind, metric.Int64NumberKind) - - // SdkEncoder uses a non-standard encoder like K1~V1&K2~V2 - SdkEncoder = &Encoder{} - // GroupEncoder uses the SDK default encoder - GroupEncoder = label.DefaultEncoder() - - // LastValue groups are (labels1), (labels2+labels3) - // Counter groups are (labels1+labels2), (labels3) - - // Labels1 has G=H and C=D - Labels1 = makeLabels(kv.String("G", "H"), kv.String("C", "D")) - // Labels2 has C=D and E=F - Labels2 = makeLabels(kv.String("C", "D"), kv.String("E", "F")) - // Labels3 is the empty set - Labels3 = makeLabels() - - testLabelEncoderID = label.NewEncoderID() -) - func NewOutput(labelEncoder label.Encoder) Output { return Output{ Map: make(map[string]float64), @@ -92,66 +52,17 @@ func NewAggregationSelector() export.AggregationSelector { return &testAggregationSelector{} } -func (*testAggregationSelector) AggregatorFor(desc *metric.Descriptor) export.Aggregator { - switch desc.MetricKind() { - case metric.CounterKind: - return sum.New() - case metric.ValueObserverKind: - return lastvalue.New() - default: - panic("Invalid descriptor MetricKind for this test") - } -} - -func makeLabels(labels ...kv.KeyValue) *label.Set { - s := label.NewSet(labels...) - return &s -} - -func (Encoder) Encode(iter label.Iterator) string { - var sb strings.Builder - for iter.Next() { - i, l := iter.IndexedLabel() - if i > 0 { - sb.WriteString("&") +func (*testAggregationSelector) AggregatorFor(desc *metric.Descriptor, aggPtrs ...*export.Aggregator) { + for _, aggp := range aggPtrs { + switch desc.MetricKind() { + case metric.CounterKind: + *aggp = sum.New() + case metric.ValueObserverKind: + *aggp = lastvalue.New() + default: + panic("Invalid descriptor MetricKind for this test") } - sb.WriteString(string(l.Key)) - sb.WriteString("~") - sb.WriteString(l.Value.Emit()) } - return sb.String() -} - -func (Encoder) ID() label.EncoderID { - return testLabelEncoderID -} - -// LastValueAgg returns a checkpointed lastValue aggregator w/ the specified descriptor and value. -func LastValueAgg(desc *metric.Descriptor, v int64) export.Aggregator { - ctx := context.Background() - gagg := lastvalue.New() - _ = gagg.Update(ctx, metric.NewInt64Number(v), desc) - gagg.Checkpoint(desc) - return gagg -} - -// Convenience method for building a test exported lastValue record. -func NewLastValueRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { - return export.NewRecord(desc, labels, Resource, LastValueAgg(desc, value)) -} - -// Convenience method for building a test exported counter record. -func NewCounterRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { - return export.NewRecord(desc, labels, Resource, CounterAgg(desc, value)) -} - -// CounterAgg returns a checkpointed counter aggregator w/ the specified descriptor and value. -func CounterAgg(desc *metric.Descriptor, v int64) export.Aggregator { - ctx := context.Background() - cagg := sum.New() - _ = cagg.Update(ctx, metric.NewInt64Number(v), desc) - cagg.Checkpoint(desc) - return cagg } // AddTo adds a name/label-encoding entry with the lastValue or counter From ea8eb40c4d275bd8db828d25fb9205a1002bff3a Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 16:36:03 -0700 Subject: [PATCH 07/21] Avoid an allocation penalty --- sdk/metric/aggregator/array/array.go | 4 +- sdk/metric/aggregator/array/array_test.go | 18 ++++---- sdk/metric/aggregator/ddsketch/ddsketch.go | 14 +++--- .../aggregator/ddsketch/ddsketch_test.go | 10 ++--- .../aggregator/histogram/benchmark_test.go | 4 +- sdk/metric/aggregator/histogram/histogram.go | 15 ++++--- .../aggregator/histogram/histogram_test.go | 17 ++++---- sdk/metric/aggregator/lastvalue/lastvalue.go | 10 +++-- .../aggregator/lastvalue/lastvalue_test.go | 14 +++--- sdk/metric/aggregator/minmaxsumcount/mmsc.go | 12 ++++-- .../aggregator/minmaxsumcount/mmsc_test.go | 14 +++--- sdk/metric/aggregator/sum/sum.go | 4 +- sdk/metric/aggregator/sum/sum_test.go | 15 +++---- sdk/metric/selector/simple/simple.go | 43 +++++++++++++------ 14 files changed, 109 insertions(+), 85 deletions(-) diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index deeaf060c0f..65b6dfce827 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -46,8 +46,8 @@ var _ aggregator.Points = &Aggregator{} // New returns a new array aggregator, which aggregates recorded // measurements by storing them in an array. This type uses a mutex // for Update() and Checkpoint() concurrency. -func New() *Aggregator { - return &Aggregator{} +func New(cnt int) []Aggregator { + return make([]Aggregator, cnt) } // Sum returns the sum of values in the checkpoint. diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 2e782af5c26..8bf44d9ff9d 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -55,8 +55,8 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func (ut *updateTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := New() - ckpt := New() + alloc := New(2) + agg, ckpt := &alloc[0], &alloc[1] all := test.NewNumbers(profile.NumberKind) @@ -124,10 +124,8 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg1 := New() - agg2 := New() - ckpt1 := New() - ckpt2 := New() + alloc := New(4) + agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] all := test.NewNumbers(profile.NumberKind) @@ -208,8 +206,8 @@ func TestArrayMerge(t *testing.T) { func TestArrayErrors(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - agg := New() - ckpt := New() + alloc := New(2) + agg, ckpt := &alloc[0], &alloc[1] _, err := ckpt.Max() require.Error(t, err) @@ -283,8 +281,8 @@ func TestArrayFloat64(t *testing.T) { all := test.NewNumbers(metric.Float64NumberKind) - agg := New() - ckpt := New() + alloc := New(2) + agg, ckpt := &alloc[0], &alloc[1] for _, f := range fpsf(1) { all.Append(metric.NewFloat64Number(f)) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index b9aaf836845..92d4312ad67 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -42,12 +42,16 @@ var _ aggregator.MinMaxSumCount = &Aggregator{} var _ aggregator.Distribution = &Aggregator{} // New returns a new DDSketch aggregator. -func New(desc *metric.Descriptor, cfg *Config) *Aggregator { - return &Aggregator{ - cfg: cfg, - kind: desc.NumberKind(), - sketch: sdk.NewDDSketch(cfg), +func New(cnt int, desc *metric.Descriptor, cfg *Config) []Aggregator { + aggs := make([]Aggregator, cnt) + for i := range aggs { + aggs[i] = Aggregator{ + cfg: cfg, + kind: desc.NumberKind(), + sketch: sdk.NewDDSketch(cfg), + } } + return aggs } // NewDefaultConfig returns a new, default DDSketch config. diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 21447dc2e95..1d18f66e60d 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -57,8 +57,8 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func (ut *updateTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := New(descriptor, NewDefaultConfig()) - ckpt := New(descriptor, NewDefaultConfig()) + alloc := New(2, descriptor, NewDefaultConfig()) + agg, ckpt := &alloc[0], &alloc[1] all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { @@ -120,10 +120,8 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg1 := New(descriptor, NewDefaultConfig()) - agg2 := New(descriptor, NewDefaultConfig()) - ckpt1 := New(descriptor, NewDefaultConfig()) - ckpt2 := New(descriptor, NewDefaultConfig()) + alloc := New(4, descriptor, NewDefaultConfig()) + agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { diff --git a/sdk/metric/aggregator/histogram/benchmark_test.go b/sdk/metric/aggregator/histogram/benchmark_test.go index ff99cbdf651..eecfca403e5 100644 --- a/sdk/metric/aggregator/histogram/benchmark_test.go +++ b/sdk/metric/aggregator/histogram/benchmark_test.go @@ -38,7 +38,7 @@ func benchmarkHistogramSearchFloat64(b *testing.B, size int) { values[i] = rand.Float64() * inputRange } desc := test.NewAggregatorTest(metric.ValueRecorderKind, metric.Float64NumberKind) - agg := histogram.New(desc, boundaries) + agg := &histogram.New(1, desc, boundaries)[0] ctx := context.Background() b.ReportAllocs() @@ -89,7 +89,7 @@ func benchmarkHistogramSearchInt64(b *testing.B, size int) { values[i] = int64(rand.Float64() * inputRange) } desc := test.NewAggregatorTest(metric.ValueRecorderKind, metric.Int64NumberKind) - agg := histogram.New(desc, boundaries) + agg := &histogram.New(1, desc, boundaries)[0] ctx := context.Background() b.ReportAllocs() diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go index 4cd9b933f3f..1609c4815a7 100644 --- a/sdk/metric/aggregator/histogram/histogram.go +++ b/sdk/metric/aggregator/histogram/histogram.go @@ -62,7 +62,9 @@ var _ aggregator.Histogram = &Aggregator{} // Note that this aggregator maintains each value using independent // atomic operations, which introduces the possibility that // checkpoints are inconsistent. -func New(desc *metric.Descriptor, boundaries []float64) *Aggregator { +func New(cnt int, desc *metric.Descriptor, boundaries []float64) []Aggregator { + aggs := make([]Aggregator, cnt) + // Boundaries MUST be ordered otherwise the histogram could not // be properly computed. sortedBoundaries := make([]float64, len(boundaries)) @@ -70,11 +72,14 @@ func New(desc *metric.Descriptor, boundaries []float64) *Aggregator { copy(sortedBoundaries, boundaries) sort.Float64s(sortedBoundaries) - return &Aggregator{ - kind: desc.NumberKind(), - boundaries: sortedBoundaries, - state: emptyState(sortedBoundaries), + for i := range aggs { + aggs[i] = Aggregator{ + kind: desc.NumberKind(), + boundaries: sortedBoundaries, + state: emptyState(sortedBoundaries), + } } + return aggs } // Sum returns the sum of all values in the checkpoint. diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go index e8545fcb34c..bfe2d32407e 100644 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ b/sdk/metric/aggregator/histogram/histogram_test.go @@ -101,8 +101,8 @@ func TestHistogramPositiveAndNegative(t *testing.T) { func testHistogram(t *testing.T, profile test.Profile, policy policy) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := histogram.New(descriptor, boundaries) - ckpt := histogram.New(descriptor, boundaries) + alloc := histogram.New(2, descriptor, boundaries) + agg, ckpt := &alloc[0], &alloc[1] all := test.NewNumbers(profile.NumberKind) @@ -147,7 +147,7 @@ func TestHistogramInitial(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := histogram.New(descriptor, boundaries) + agg := histogram.New(1, descriptor, boundaries)[0] buckets, err := agg.Histogram() require.NoError(t, err) @@ -160,10 +160,8 @@ func TestHistogramMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg1 := histogram.New(descriptor, boundaries) - agg2 := histogram.New(descriptor, boundaries) - ckpt1 := histogram.New(descriptor, boundaries) - ckpt2 := histogram.New(descriptor, boundaries) + alloc := histogram.New(4, descriptor, boundaries) + agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] all := test.NewNumbers(profile.NumberKind) @@ -215,8 +213,9 @@ func TestHistogramNotSet(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := histogram.New(descriptor, boundaries) - ckpt := histogram.New(descriptor, boundaries) + alloc := histogram.New(2, descriptor, boundaries) + agg, ckpt := &alloc[0], &alloc[1] + err := agg.Checkpoint(ckpt, descriptor) require.NoError(t, err) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue.go b/sdk/metric/aggregator/lastvalue/lastvalue.go index 41775e0028f..2e89a0c27f6 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue.go @@ -57,10 +57,14 @@ var unsetLastValue = &lastValueData{} // New returns a new lastValue aggregator. This aggregator retains the // last value and timestamp that were recorded. -func New() *Aggregator { - return &Aggregator{ - value: unsafe.Pointer(unsetLastValue), +func New(cnt int) []Aggregator { + aggs := make([]Aggregator, cnt) + for i := range aggs { + aggs[i] = Aggregator{ + value: unsafe.Pointer(unsetLastValue), + } } + return aggs } // LastValue returns the last-recorded lastValue value and the diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go index d3c4a71a97b..6416d1e2b43 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue_test.go @@ -59,8 +59,8 @@ func checkZero(t *testing.T, agg *Aggregator) { func TestLastValueUpdate(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - agg := New() - ckpt := New() + alloc := New(2) + agg, ckpt := &alloc[0], &alloc[1] record := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind) @@ -82,10 +82,8 @@ func TestLastValueUpdate(t *testing.T) { func TestLastValueMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - agg1 := New() - agg2 := New() - ckpt1 := New() - ckpt2 := New() + alloc := New(4) + agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] descriptor := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind) @@ -120,8 +118,8 @@ func TestLastValueMerge(t *testing.T) { func TestLastValueNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueObserverKind, metric.Int64NumberKind) - g := New() - ckpt := New() + g := &New(1)[0] + ckpt := &New(1)[0] g.Checkpoint(ckpt, descriptor) checkZero(t, g) diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc.go b/sdk/metric/aggregator/minmaxsumcount/mmsc.go index edacfa2a596..c4b0c645834 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc.go @@ -48,12 +48,16 @@ var _ aggregator.MinMaxSumCount = &Aggregator{} // Max. // // This type uses a mutex for Update() and Checkpoint() concurrency. -func New(desc *metric.Descriptor) *Aggregator { +func New(cnt int, desc *metric.Descriptor) []Aggregator { kind := desc.NumberKind() - return &Aggregator{ - kind: kind, - state: emptyState(kind), + aggs := make([]Aggregator, cnt) + for i := range aggs { + aggs[i] = Aggregator{ + kind: kind, + state: emptyState(kind), + } } + return aggs } // Sum returns the sum of values in the checkpoint. diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go index d65dd2e595a..10ac605e8cf 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go @@ -100,8 +100,8 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := New(descriptor) - ckpt := New(descriptor) + alloc := New(2, descriptor) + agg, ckpt := &alloc[0], &alloc[1] all := test.NewNumbers(profile.NumberKind) @@ -149,10 +149,8 @@ func TestMinMaxSumCountMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg1 := New(descriptor) - agg2 := New(descriptor) - ckpt1 := New(descriptor) - ckpt2 := New(descriptor) + alloc := New(4, descriptor) + agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] all := test.NewNumbers(profile.NumberKind) @@ -210,8 +208,8 @@ func TestMaxSumCountNotSet(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := New(descriptor) - ckpt := New(descriptor) + alloc := New(2, descriptor) + agg, ckpt := &alloc[0], &alloc[1] agg.Checkpoint(ckpt, descriptor) diff --git a/sdk/metric/aggregator/sum/sum.go b/sdk/metric/aggregator/sum/sum.go index 091e572801a..b192ec1f01d 100644 --- a/sdk/metric/aggregator/sum/sum.go +++ b/sdk/metric/aggregator/sum/sum.go @@ -35,8 +35,8 @@ var _ aggregator.Sum = &Aggregator{} // New returns a new counter aggregator implemented by atomic // operations. This aggregator implements the aggregator.Sum // export interface. -func New() *Aggregator { - return &Aggregator{} +func New(cnt int) []Aggregator { + return make([]Aggregator, cnt) } // Sum returns the last-checkpointed sum. This will never return an diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index db00a99d40e..81f1c43c19d 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -53,8 +53,8 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func TestCounterSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - agg := New() - ckpt := New() + alloc := New(2) + agg, ckpt := &alloc[0], &alloc[1] descriptor := test.NewAggregatorTest(metric.CounterKind, profile.NumberKind) @@ -78,8 +78,8 @@ func TestCounterSum(t *testing.T) { func TestValueRecorderSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - agg := New() - ckpt := New() + alloc := New(2) + agg, ckpt := &alloc[0], &alloc[1] descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) @@ -105,11 +105,8 @@ func TestValueRecorderSum(t *testing.T) { func TestCounterMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - agg1 := New() - agg2 := New() - - ckpt1 := New() - ckpt2 := New() + alloc := New(4) + agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] descriptor := test.NewAggregatorTest(metric.CounterKind, profile.NumberKind) diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index 1cd783e0ef1..6b9cc76e59f 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -79,38 +79,57 @@ func NewWithHistogramDistribution(boundaries []float64) export.AggregationSelect return selectorHistogram{boundaries: boundaries} } -func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator { +func sumAggs(aggPtrs []*export.Aggregator) { + aggs := sum.New(len(aggPtrs)) + for i := range aggPtrs { + *aggPtrs[i] = &aggs[i] + } +} + +func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.MetricKind() { case metric.ValueObserverKind, metric.ValueRecorderKind: - return minmaxsumcount.New(descriptor) + aggs := minmaxsumcount.New(len(aggPtrs), descriptor) + for i := range aggPtrs { + *aggPtrs[i] = &aggs[i] + } default: - return sum.New() + sumAggs(aggPtrs) } } -func (s selectorSketch) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator { +func (s selectorSketch) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.MetricKind() { case metric.ValueObserverKind, metric.ValueRecorderKind: - return ddsketch.New(descriptor, s.config) + aggs := ddsketch.New(len(aggPtrs), descriptor, s.config) + for i := range aggPtrs { + *aggPtrs[i] = &aggs[i] + } default: - return sum.New() + sumAggs(aggPtrs) } } -func (selectorExact) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator { +func (selectorExact) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.MetricKind() { case metric.ValueObserverKind, metric.ValueRecorderKind: - return array.New() + aggs := array.New(len(aggPtrs)) + for i := range aggPtrs { + *aggPtrs[i] = &aggs[i] + } default: - return sum.New() + sumAggs(aggPtrs) } } -func (s selectorHistogram) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator { +func (s selectorHistogram) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) { switch descriptor.MetricKind() { case metric.ValueObserverKind, metric.ValueRecorderKind: - return histogram.New(descriptor, s.boundaries) + aggs := histogram.New(len(aggPtrs), descriptor, s.boundaries) + for i := range aggPtrs { + *aggPtrs[i] = &aggs[i] + } default: - return sum.New() + sumAggs(aggPtrs) } } From 8645651364c0bc296e58fd3cd60506b47158be62 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 16:42:13 -0700 Subject: [PATCH 08/21] Fix selector and integrator tests --- sdk/metric/integrator/simple/simple_test.go | 18 ++++-------- sdk/metric/integrator/test/test.go | 4 +-- sdk/metric/selector/simple/simple_test.go | 31 +++++++++++++-------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/sdk/metric/integrator/simple/simple_test.go b/sdk/metric/integrator/simple/simple_test.go index 0f09d52b527..63df98e6726 100644 --- a/sdk/metric/integrator/simple/simple_test.go +++ b/sdk/metric/integrator/simple/simple_test.go @@ -69,11 +69,9 @@ func makeLabels(labels ...kv.KeyValue) *label.Set { // LastValueAgg returns a checkpointed lastValue aggregator w/ the specified descriptor and value. func LastValueAgg(desc *metric.Descriptor, v int64) export.Aggregator { ctx := context.Background() - gagg := lastvalue.New() - ckpt := lastvalue.New() + gagg := &lastvalue.New(1)[0] _ = gagg.Update(ctx, metric.NewInt64Number(v), desc) - _ = gagg.Checkpoint(ckpt, desc) - return ckpt + return gagg } // Convenience method for building a test exported lastValue record. @@ -89,11 +87,9 @@ func NewCounterRecord(desc *metric.Descriptor, labels *label.Set, value int64) e // CounterAgg returns a checkpointed counter aggregator w/ the specified descriptor and value. func CounterAgg(desc *metric.Descriptor, v int64) export.Aggregator { ctx := context.Background() - cagg := sum.New() - ckpt := sum.New() + cagg := &sum.New(1)[0] _ = cagg.Update(ctx, metric.NewInt64Number(v), desc) - cagg.Checkpoint(ckpt, desc) - return ckpt + return cagg } func TestSimpleStateless(t *testing.T) { @@ -178,10 +174,8 @@ func TestSimpleStateful(t *testing.T) { "sum.b/C=D,G=H/R=V": 10, // labels1 }, records1.Map) - caggA := sum.New() - caggB := sum.New() - ckptA := sum.New() - ckptB := sum.New() + alloc := sum.New(4) + caggA, caggB, ckptA, ckptB := &alloc[0], &alloc[1], &alloc[2], &alloc[3] // Test that state was NOT reset checkpointSet = b.CheckpointSet() diff --git a/sdk/metric/integrator/test/test.go b/sdk/metric/integrator/test/test.go index a3b4db3faa8..96757cecd95 100644 --- a/sdk/metric/integrator/test/test.go +++ b/sdk/metric/integrator/test/test.go @@ -56,9 +56,9 @@ func (*testAggregationSelector) AggregatorFor(desc *metric.Descriptor, aggPtrs . for _, aggp := range aggPtrs { switch desc.MetricKind() { case metric.CounterKind: - *aggp = sum.New() + *aggp = &sum.New(1)[0] case metric.ValueObserverKind: - *aggp = lastvalue.New() + *aggp = &lastvalue.New(1)[0] default: panic("Invalid descriptor MetricKind for this test") } diff --git a/sdk/metric/selector/simple/simple_test.go b/sdk/metric/selector/simple/simple_test.go index 35b2526b017..a2dc037bbfc 100644 --- a/sdk/metric/selector/simple/simple_test.go +++ b/sdk/metric/selector/simple/simple_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/api/metric" + export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch" "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" @@ -34,30 +35,36 @@ var ( testValueObserverDesc = metric.NewDescriptor("valueobserver", metric.ValueObserverKind, metric.Int64NumberKind) ) +func oneAgg(sel export.AggregationSelector, desc *metric.Descriptor) export.Aggregator { + var agg export.Aggregator + sel.AggregatorFor(desc, &agg) + return agg +} + func TestInexpensiveDistribution(t *testing.T) { inex := simple.NewWithInexpensiveDistribution() - require.NotPanics(t, func() { _ = inex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) - require.NotPanics(t, func() { _ = inex.AggregatorFor(&testValueRecorderDesc).(*minmaxsumcount.Aggregator) }) - require.NotPanics(t, func() { _ = inex.AggregatorFor(&testValueObserverDesc).(*minmaxsumcount.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(inex, &testCounterDesc).(*sum.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(inex, &testValueRecorderDesc).(*minmaxsumcount.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(inex, &testValueObserverDesc).(*minmaxsumcount.Aggregator) }) } func TestSketchDistribution(t *testing.T) { sk := simple.NewWithSketchDistribution(ddsketch.NewDefaultConfig()) - require.NotPanics(t, func() { _ = sk.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) - require.NotPanics(t, func() { _ = sk.AggregatorFor(&testValueRecorderDesc).(*ddsketch.Aggregator) }) - require.NotPanics(t, func() { _ = sk.AggregatorFor(&testValueObserverDesc).(*ddsketch.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(sk, &testCounterDesc).(*sum.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(sk, &testValueRecorderDesc).(*ddsketch.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(sk, &testValueObserverDesc).(*ddsketch.Aggregator) }) } func TestExactDistribution(t *testing.T) { ex := simple.NewWithExactDistribution() - require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) - require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueRecorderDesc).(*array.Aggregator) }) - require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueObserverDesc).(*array.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(ex, &testCounterDesc).(*sum.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(ex, &testValueRecorderDesc).(*array.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(ex, &testValueObserverDesc).(*array.Aggregator) }) } func TestHistogramDistribution(t *testing.T) { ex := simple.NewWithHistogramDistribution(nil) - require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) - require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueRecorderDesc).(*histogram.Aggregator) }) - require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueObserverDesc).(*histogram.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(ex, &testCounterDesc).(*sum.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(ex, &testValueRecorderDesc).(*histogram.Aggregator) }) + require.NotPanics(t, func() { _ = oneAgg(ex, &testValueObserverDesc).(*histogram.Aggregator) }) } From 4f04527224c1d2d76b26651a0662ca145b9f8810 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 17:03:01 -0700 Subject: [PATCH 09/21] Checkpoint --- exporters/metric/test/test.go | 9 +++-- sdk/metric/sdk.go | 68 ++++++++++++++++++++++------------- sdk/metric/stress_test.go | 4 +-- 3 files changed, 50 insertions(+), 31 deletions(-) diff --git a/exporters/metric/test/test.go b/exporters/metric/test/test.go index 396c8284c12..a83e90176a5 100644 --- a/exporters/metric/test/test.go +++ b/exporters/metric/test/test.go @@ -86,26 +86,25 @@ func createNumber(desc *metric.Descriptor, v float64) metric.Number { } func (p *CheckpointSet) AddLastValue(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, lastvalue.New(), v, labels...) + p.updateAggregator(desc, &lastvalue.New(1)[0], v, labels...) } func (p *CheckpointSet) AddCounter(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, sum.New(), v, labels...) + p.updateAggregator(desc, &sum.New(1)[0], v, labels...) } func (p *CheckpointSet) AddValueRecorder(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, array.New(), v, labels...) + p.updateAggregator(desc, &array.New(1)[0], v, labels...) } func (p *CheckpointSet) AddHistogramValueRecorder(desc *metric.Descriptor, boundaries []float64, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, histogram.New(desc, boundaries), v, labels...) + p.updateAggregator(desc, &histogram.New(1, desc, boundaries)[0], v, labels...) } func (p *CheckpointSet) updateAggregator(desc *metric.Descriptor, newAgg export.Aggregator, v float64, labels ...kv.KeyValue) { ctx := context.Background() // Updates and checkpoint the new aggregator _ = newAgg.Update(ctx, createNumber(desc, v), desc) - newAgg.Checkpoint(desc) // Try to add this aggregator to the CheckpointSet agg, added := p.Add(desc, newAgg, labels...) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 8d4acc78a68..1daf4d13de1 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -116,10 +116,11 @@ type ( // inst is a pointer to the corresponding instrument. inst *syncInstrument - // recorder implements the actual RecordOne() API, + // current implements the actual RecordOne() API, // depending on the type of aggregation. If nil, the // metric was disabled by the exporter. - recorder export.Aggregator + current export.Aggregator + checkpoint export.Aggregator } instrument struct { @@ -137,7 +138,7 @@ type ( labeledRecorder struct { observedEpoch int64 labels *label.Set - recorder export.Aggregator + observed export.Aggregator } ) @@ -185,14 +186,15 @@ func (a *asyncInstrument) getRecorder(labels *label.Set) export.Aggregator { if lrec.observedEpoch == a.meter.currentEpoch { // last value wins for Observers, so if we see the same labels // in the current epoch, we replace the old recorder - lrec.recorder = a.meter.integrator.AggregatorFor(&a.descriptor) + a.meter.integrator.AggregatorFor(&a.descriptor, &lrec.observed) } else { lrec.observedEpoch = a.meter.currentEpoch } a.recorders[labels.Equivalent()] = lrec - return lrec.recorder + return lrec.observed } - rec := a.meter.integrator.AggregatorFor(&a.descriptor) + var rec export.Aggregator + a.meter.integrator.AggregatorFor(&a.descriptor, &rec) if a.recorders == nil { a.recorders = make(map[label.Distinct]*labeledRecorder) } @@ -200,7 +202,7 @@ func (a *asyncInstrument) getRecorder(labels *label.Set) export.Aggregator { // asyncInstrument for the labelset for good. This is intentional, // but will be revisited later. a.recorders[labels.Equivalent()] = &labeledRecorder{ - recorder: rec, + observed: rec, labels: labels, observedEpoch: a.meter.currentEpoch, } @@ -252,7 +254,8 @@ func (s *syncInstrument) acquireHandle(kvs []kv.KeyValue, labelPtr *label.Set) * } rec.refMapped = refcountMapped{value: 2} rec.inst = s - rec.recorder = s.meter.integrator.AggregatorFor(&s.descriptor) + + s.meter.integrator.AggregatorFor(&s.descriptor, &rec.current, &rec.checkpoint) for { // Load/Store: there's a memory allocation to place `mk` into @@ -432,7 +435,17 @@ func (m *Accumulator) observeAsyncInstruments(ctx context.Context) int { } func (m *Accumulator) checkpointRecord(r *record) int { - return m.checkpoint(&r.inst.descriptor, r.recorder, r.labels) + if r.current == nil { + return 0 + } + r.current.Checkpoint(r.checkpoint, &r.inst.descriptor) + + exportRecord := export.NewRecord(&r.inst.descriptor, r.labels, m.resource, r.checkpoint) + err := m.integrator.Process(exportRecord) + if err != nil { + global.Handle(err) + } + return 1 } func (m *Accumulator) checkpointAsync(a *asyncInstrument) int { @@ -444,7 +457,14 @@ func (m *Accumulator) checkpointAsync(a *asyncInstrument) int { lrec := lrec epochDiff := m.currentEpoch - lrec.observedEpoch if epochDiff == 0 { - checkpointed += m.checkpoint(&a.descriptor, lrec.recorder, lrec.labels) + if lrec.observed != nil { + exportRecord := export.NewRecord(&a.descriptor, lrec.labels, m.resource, lrec.observed) + err := m.integrator.Process(exportRecord) + if err != nil { + global.Handle(err) + } + checkpointed += 1 + } } else if epochDiff > 1 { // This is second collection cycle with no // observations for this labelset. Remove the @@ -458,19 +478,19 @@ func (m *Accumulator) checkpointAsync(a *asyncInstrument) int { return checkpointed } -func (m *Accumulator) checkpoint(descriptor *metric.Descriptor, recorder export.Aggregator, labels *label.Set) int { - if recorder == nil { - return 0 - } - recorder.Checkpoint(descriptor) +// func (m *Accumulator) checkpoint(descriptor *metric.Descriptor, recorder export.Aggregator, labels *label.Set) int { +// if recorder == nil { +// return 0 +// } +// recorder.Checkpoint(descriptor) - exportRecord := export.NewRecord(descriptor, labels, m.resource, recorder) - err := m.integrator.Process(exportRecord) - if err != nil { - global.Handle(err) - } - return 1 -} +// exportRecord := export.NewRecord(descriptor, labels, m.resource, recorder) +// err := m.integrator.Process(exportRecord) +// if err != nil { +// global.Handle(err) +// } +// return 1 +// } // RecordBatch enters a batch of metric events. func (m *Accumulator) RecordBatch(ctx context.Context, kvs []kv.KeyValue, measurements ...api.Measurement) { @@ -498,7 +518,7 @@ func (m *Accumulator) RecordBatch(ctx context.Context, kvs []kv.KeyValue, measur // RecordOne implements api.SyncImpl. func (r *record) RecordOne(ctx context.Context, number api.Number) { - if r.recorder == nil { + if r.current == nil { // The instrument is disabled according to the AggregationSelector. return } @@ -506,7 +526,7 @@ func (r *record) RecordOne(ctx context.Context, number api.Number) { global.Handle(err) return } - if err := r.recorder.Update(ctx, number, &r.inst.descriptor); err != nil { + if err := r.current.Update(ctx, number, &r.inst.descriptor); err != nil { global.Handle(err) return } diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 92094de340a..3e8653026ca 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -248,9 +248,9 @@ func (*testFixture) AggregatorFor(descriptor *metric.Descriptor) export.Aggregat name := descriptor.Name() switch { case strings.HasSuffix(name, "counter"): - return sum.New() + return &sum.New(1)[0] case strings.HasSuffix(name, "lastvalue"): - return lastvalue.New() + return &lastvalue.New(1)[0] default: panic("Not implemented for this test") } From 706d5183d0defd8327c293b3cf94d65ee195325f Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 18:14:52 -0700 Subject: [PATCH 10/21] Checkpoint --- .../aggregator/ddsketch/ddsketch_test.go | 8 +- sdk/metric/controller/push/push_test.go | 2 +- sdk/metric/correct_test.go | 119 ++++++++++-------- 3 files changed, 69 insertions(+), 60 deletions(-) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 1d18f66e60d..48a52d1979c 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/api/metric" - "go.opentelemetry.io/otel/sdk/export/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) @@ -43,15 +43,15 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { require.Equal(t, int64(0), count) max, err := agg.Max() - require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.True(t, errors.Is(err, aggregation.ErrNoData)) require.Equal(t, kind.Zero(), max) median, err := agg.Quantile(0.5) - require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.True(t, errors.Is(err, aggregation.ErrNoData)) require.Equal(t, kind.Zero(), median) min, err := agg.Min() - require.True(t, errors.Is(err, aggregator.ErrNoData)) + require.True(t, errors.Is(err, aggregation.ErrNoData)) require.Equal(t, kind.Zero(), min) } diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index 18d66b224f0..38cad3235a4 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -33,9 +33,9 @@ import ( "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/controller/push" controllerTest "go.opentelemetry.io/otel/sdk/metric/controller/test" + "go.opentelemetry.io/otel/sdk/metric/integrator/test" integratorTest "go.opentelemetry.io/otel/sdk/metric/integrator/test" "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/sdk/metric/integrator/test" ) var testResource = resource.New(kv.String("R", "V")) diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 23392d160ce..f1bbd049317 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -71,19 +71,27 @@ func init() { } type correctnessIntegrator struct { - newAggCount int64 - t *testing.T - export.AggregationSelector + *testSelector records []export.Record } +type testSelector struct { + selector export.AggregationSelector + newAggCount int +} + +func (ts *testSelector) AggregatorFor(desc *metric.Descriptor, aggPtrs ...*export.Aggregator) { + ts.newAggCount += len(aggPtrs) + test.AggregationSelector().AggregatorFor(desc, aggPtrs...) +} + func newSDK(t *testing.T) (metric.Meter, *metricsdk.Accumulator, *correctnessIntegrator) { testHandler.Reset() integrator := &correctnessIntegrator{ - t: t, - AggregationSelector: test.AggregationSelector(), + t: t, + testSelector: &testSelector{selector: test.AggregationSelector()}, } accum := metricsdk.NewAccumulator( integrator, @@ -132,7 +140,7 @@ func TestInputRangeUpDownCounter(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - counter := Must(meter).NewInt64UpDownCounter("name.updowncounter") + counter := Must(meter).NewInt64UpDownCounter("name.sum") counter.Add(ctx, -1) counter.Add(ctx, -1) @@ -189,7 +197,7 @@ func TestRecordNaN(t *testing.T) { ctx := context.Background() meter, _, _ := newSDK(t) - c := Must(meter).NewFloat64Counter("sum.name") + c := Must(meter).NewFloat64Counter("name.sum") require.Nil(t, testHandler.Flush()) c.Add(ctx, math.NaN()) @@ -200,7 +208,7 @@ func TestSDKLabelsDeduplication(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - counter := Must(meter).NewInt64Counter("counter") + counter := Must(meter).NewInt64Counter("name.sum") const ( maxKeys = 21 @@ -299,13 +307,13 @@ func TestObserverCollection(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - _ = Must(meter).NewFloat64ValueObserver("float.valueobserver", func(_ context.Context, result metric.Float64ObserverResult) { + _ = Must(meter).NewFloat64ValueObserver("float.valueobserver.lastvalue", func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(1, kv.String("A", "B")) // last value wins result.Observe(-1, kv.String("A", "B")) result.Observe(-1, kv.String("C", "D")) }) - _ = Must(meter).NewInt64ValueObserver("int.valueobserver", func(_ context.Context, result metric.Int64ObserverResult) { + _ = Must(meter).NewInt64ValueObserver("int.valueobserver.lastvalue", func(_ context.Context, result metric.Int64ObserverResult) { result.Observe(-1, kv.String("A", "B")) result.Observe(1) // last value wins @@ -313,12 +321,12 @@ func TestObserverCollection(t *testing.T) { result.Observe(1) }) - _ = Must(meter).NewFloat64SumObserver("float.sumobserver", func(_ context.Context, result metric.Float64ObserverResult) { + _ = Must(meter).NewFloat64SumObserver("float.sumobserver.sum", func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(1, kv.String("A", "B")) result.Observe(2, kv.String("A", "B")) result.Observe(1, kv.String("C", "D")) }) - _ = Must(meter).NewInt64SumObserver("int.sumobserver", func(_ context.Context, result metric.Int64ObserverResult) { + _ = Must(meter).NewInt64SumObserver("int.sumobserver.sum", func(_ context.Context, result metric.Int64ObserverResult) { result.Observe(2, kv.String("A", "B")) result.Observe(1) // last value wins @@ -326,12 +334,12 @@ func TestObserverCollection(t *testing.T) { result.Observe(1) }) - _ = Must(meter).NewFloat64UpDownSumObserver("float.updownsumobserver", func(_ context.Context, result metric.Float64ObserverResult) { + _ = Must(meter).NewFloat64UpDownSumObserver("float.updownsumobserver.sum", func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(1, kv.String("A", "B")) result.Observe(-2, kv.String("A", "B")) result.Observe(1, kv.String("C", "D")) }) - _ = Must(meter).NewInt64UpDownSumObserver("int.updownsumobserver", func(_ context.Context, result metric.Int64ObserverResult) { + _ = Must(meter).NewInt64UpDownSumObserver("int.updownsumobserver.sum", func(_ context.Context, result metric.Int64ObserverResult) { result.Observe(2, kv.String("A", "B")) result.Observe(1) // last value wins @@ -339,7 +347,7 @@ func TestObserverCollection(t *testing.T) { result.Observe(-1) }) - _ = Must(meter).NewInt64ValueObserver("empty.valueobserver", func(_ context.Context, result metric.Int64ObserverResult) { + _ = Must(meter).NewInt64ValueObserver("empty.valueobserver.sum", func(_ context.Context, result metric.Int64ObserverResult) { }) collected := sdk.Collect(ctx) @@ -351,20 +359,20 @@ func TestObserverCollection(t *testing.T) { _ = out.AddTo(rec) } require.EqualValues(t, map[string]float64{ - "float.valueobserver/A=B/R=V": -1, - "float.valueobserver/C=D/R=V": -1, - "int.valueobserver//R=V": 1, - "int.valueobserver/A=B/R=V": 1, - - "float.sumobserver/A=B/R=V": 2, - "float.sumobserver/C=D/R=V": 1, - "int.sumobserver//R=V": 1, - "int.sumobserver/A=B/R=V": 1, - - "float.updownsumobserver/A=B/R=V": -2, - "float.updownsumobserver/C=D/R=V": 1, - "int.updownsumobserver//R=V": -1, - "int.updownsumobserver/A=B/R=V": 1, + "float.valueobserver.lastvalue/A=B/R=V": -1, + "float.valueobserver.lastvalue/C=D/R=V": -1, + "int.valueobserver.lastvalue//R=V": 1, + "int.valueobserver.lastvalue/A=B/R=V": 1, + + "float.sumobserver.sum/A=B/R=V": 2, + "float.sumobserver.sum/C=D/R=V": 1, + "int.sumobserver.sum//R=V": 1, + "int.sumobserver.sum/A=B/R=V": 1, + + "float.updownsumobserver.sum/A=B/R=V": -2, + "float.updownsumobserver.sum/C=D/R=V": 1, + "int.updownsumobserver.sum//R=V": -1, + "int.updownsumobserver.sum/A=B/R=V": 1, }, out.Map) } @@ -372,13 +380,14 @@ func TestSumObserverInputRange(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - _ = Must(meter).NewFloat64SumObserver("float.sumobserver", func(_ context.Context, result metric.Float64ObserverResult) { + // TODO: these tests are testing for negative values, not for _descending values_. Fix. + _ = Must(meter).NewFloat64SumObserver("float.sumobserver.sum", func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(-2, kv.String("A", "B")) require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) result.Observe(-1, kv.String("C", "D")) require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) }) - _ = Must(meter).NewInt64SumObserver("int.sumobserver", func(_ context.Context, result metric.Int64ObserverResult) { + _ = Must(meter).NewInt64SumObserver("int.sumobserver.sum", func(_ context.Context, result metric.Int64ObserverResult) { result.Observe(-1, kv.String("A", "B")) require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) result.Observe(-1) @@ -437,12 +446,12 @@ func TestObserverBatch(t *testing.T) { intUpDownSumObs.Observation(10), ) }) - floatValueObs = batch.NewFloat64ValueObserver("float.valueobserver") - intValueObs = batch.NewInt64ValueObserver("int.valueobserver") - floatSumObs = batch.NewFloat64SumObserver("float.sumobserver") - intSumObs = batch.NewInt64SumObserver("int.sumobserver") - floatUpDownSumObs = batch.NewFloat64UpDownSumObserver("float.updownsumobserver") - intUpDownSumObs = batch.NewInt64UpDownSumObserver("int.updownsumobserver") + floatValueObs = batch.NewFloat64ValueObserver("float.valueobserver.lastvalue") + intValueObs = batch.NewInt64ValueObserver("int.valueobserver.lastvalue") + floatSumObs = batch.NewFloat64SumObserver("float.sumobserver.sum") + intSumObs = batch.NewInt64SumObserver("int.sumobserver.sum") + floatUpDownSumObs = batch.NewFloat64UpDownSumObserver("float.updownsumobserver.sum") + intUpDownSumObs = batch.NewInt64UpDownSumObserver("int.updownsumobserver.sum") collected := sdk.Collect(ctx) @@ -453,20 +462,20 @@ func TestObserverBatch(t *testing.T) { _ = out.AddTo(rec) } require.EqualValues(t, map[string]float64{ - "float.sumobserver//R=V": 1.1, - "float.sumobserver/A=B/R=V": 1000, - "int.sumobserver//R=V": 10, - "int.sumobserver/A=B/R=V": 100, - - "int.updownsumobserver/A=B/R=V": -100, - "float.updownsumobserver/A=B/R=V": -1000, - "int.updownsumobserver//R=V": 10, - "float.updownsumobserver/C=D/R=V": -1, - - "float.valueobserver/A=B/R=V": -1, - "float.valueobserver/C=D/R=V": -1, - "int.valueobserver//R=V": 1, - "int.valueobserver/A=B/R=V": 1, + "float.sumobserver.sum//R=V": 1.1, + "float.sumobserver.sum/A=B/R=V": 1000, + "int.sumobserver.sum//R=V": 10, + "int.sumobserver.sum/A=B/R=V": 100, + + "int.updownsumobserver.sum/A=B/R=V": -100, + "float.updownsumobserver.sum/A=B/R=V": -1000, + "int.updownsumobserver.sum//R=V": 10, + "float.updownsumobserver.sum/C=D/R=V": -1, + + "float.valueobserver.lastvalue/A=B/R=V": -1, + "float.valueobserver.lastvalue/C=D/R=V": -1, + "int.valueobserver.lastvalue//R=V": 1, + "int.valueobserver.lastvalue/A=B/R=V": 1, }, out.Map) } @@ -512,7 +521,7 @@ func TestRecordPersistence(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - c := Must(meter).NewFloat64Counter("sum.name") + c := Must(meter).NewFloat64Counter("name.sum") b := c.Bind(kv.String("bound", "true")) uk := kv.String("bound", "false") @@ -522,7 +531,7 @@ func TestRecordPersistence(t *testing.T) { sdk.Collect(ctx) } - require.Equal(t, int64(2), integrator.newAggCount) + require.Equal(t, 4, integrator.newAggCount) } func TestIncorrectInstruments(t *testing.T) { @@ -546,7 +555,7 @@ func TestIncorrectInstruments(t *testing.T) { // Now try with instruments from another SDK. var noopMeter metric.Meter - counter = metric.Must(noopMeter).NewInt64Counter("counter") + counter = metric.Must(noopMeter).NewInt64Counter("name.sum") observer = metric.Must(noopMeter).NewBatchObserver( func(context.Context, metric.BatchObserverResult) {}, ).NewInt64ValueObserver("observer") @@ -565,7 +574,7 @@ func TestSyncInAsync(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - counter := Must(meter).NewFloat64Counter("counter") + counter := Must(meter).NewFloat64Counter("name.sum") _ = Must(meter).NewInt64ValueObserver("observer", func(ctx context.Context, result metric.Int64ObserverResult) { result.Observe(10) From 2d008bea378a770ac9b1bcb6196bed93f94776a1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 21:16:41 -0700 Subject: [PATCH 11/21] Repair the sdk/metric tests --- sdk/metric/controller/push/push_test.go | 16 ++++++++-------- sdk/metric/correct_test.go | 8 ++++---- sdk/metric/histogram_stress_test.go | 12 ++++++------ sdk/metric/minmaxsumcount_stress_test.go | 11 +++++------ sdk/metric/stress_test.go | 4 ++-- 5 files changed, 25 insertions(+), 26 deletions(-) diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index 38cad3235a4..a0d177a9080 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -151,7 +151,7 @@ func TestPushTicker(t *testing.T) { ctx := context.Background() - counter := metric.Must(meter).NewInt64Counter("counter") + counter := metric.Must(meter).NewInt64Counter("counter.sum") p.Start() @@ -167,7 +167,7 @@ func TestPushTicker(t *testing.T) { records, exports = fix.exporter.resetRecords() require.Equal(t, 1, exports) require.Equal(t, 1, len(records)) - require.Equal(t, "counter", records[0].Descriptor().Name()) + require.Equal(t, "counter.sum", records[0].Descriptor().Name()) require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder())) sum, err := records[0].Aggregator().(aggregation.Sum).Sum() @@ -184,7 +184,7 @@ func TestPushTicker(t *testing.T) { records, exports = fix.exporter.resetRecords() require.Equal(t, 2, exports) require.Equal(t, 1, len(records)) - require.Equal(t, "counter", records[0].Descriptor().Name()) + require.Equal(t, "counter.sum", records[0].Descriptor().Name()) require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder())) sum, err = records[0].Aggregator().(aggregation.Sum).Sum() @@ -210,14 +210,14 @@ func TestPushExportError(t *testing.T) { expectedDescriptors []string expectedError error }{ - {"errNone", nil, []string{"counter1{R=V,X=Y}", "counter2{R=V,}"}, nil}, - {"errNoData", aggregation.ErrNoData, []string{"counter2{R=V,}"}, nil}, + {"errNone", nil, []string{"counter1.sum{R=V,X=Y}", "counter2.sum{R=V,}"}, nil}, + {"errNoData", aggregation.ErrNoData, []string{"counter2.sum{R=V,}"}, nil}, {"errUnexpected", errAggregator, []string{}, errAggregator}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fix := newFixture(t) - fix.exporter.injectErr = injector("counter1", tt.injectedError) + fix.exporter.injectErr = injector("counter1.sum", tt.injectedError) p := push.New( test.AggregationSelector(), @@ -232,8 +232,8 @@ func TestPushExportError(t *testing.T) { ctx := context.Background() meter := p.Provider().Meter("name") - counter1 := metric.Must(meter).NewInt64Counter("counter1") - counter2 := metric.Must(meter).NewInt64Counter("counter2") + counter1 := metric.Must(meter).NewInt64Counter("counter1.sum") + counter2 := metric.Must(meter).NewInt64Counter("counter2.sum") p.Start() runtime.Gosched() diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index f1bbd049317..cd8b0143ec2 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -574,8 +574,8 @@ func TestSyncInAsync(t *testing.T) { ctx := context.Background() meter, sdk, integrator := newSDK(t) - counter := Must(meter).NewFloat64Counter("name.sum") - _ = Must(meter).NewInt64ValueObserver("observer", + counter := Must(meter).NewFloat64Counter("counter.sum") + _ = Must(meter).NewInt64ValueObserver("observer.lastvalue", func(ctx context.Context, result metric.Int64ObserverResult) { result.Observe(10) counter.Add(ctx, 100) @@ -589,7 +589,7 @@ func TestSyncInAsync(t *testing.T) { _ = out.AddTo(rec) } require.EqualValues(t, map[string]float64{ - "counter//R=V": 100, - "observer//R=V": 10, + "counter.sum//R=V": 100, + "observer.lastvalue//R=V": 10, }, out.Map) } diff --git a/sdk/metric/histogram_stress_test.go b/sdk/metric/histogram_stress_test.go index 0ed2a59b08c..0526a8b3d82 100644 --- a/sdk/metric/histogram_stress_test.go +++ b/sdk/metric/histogram_stress_test.go @@ -25,10 +25,10 @@ import ( ) func TestStressInt64Histogram(t *testing.T) { - desc := metric.NewDescriptor("some.histogram", metric.ValueRecorderKind, metric.Int64NumberKind) + desc := metric.NewDescriptor("some_metric", metric.ValueRecorderKind, metric.Int64NumberKind) alloc := histogram.New(2, &desc, []float64{25, 50, 75}) - histo, ckpt := &alloc[0], &alloc[1] + h, ckpt := &alloc[0], &alloc[1] ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() @@ -39,17 +39,17 @@ func TestStressInt64Histogram(t *testing.T) { case <-ctx.Done(): return default: - _ = histo.Update(ctx, metric.NewInt64Number(rnd.Int63()%100), &desc) + _ = h.Update(ctx, metric.NewInt64Number(rnd.Int63()%100), &desc) } } }() startTime := time.Now() for time.Since(startTime) < time.Second { - histo.Checkpoint(ckpt, &desc) + h.Checkpoint(ckpt, &desc) - b, _ := histo.Histogram() - c, _ := histo.Count() + b, _ := ckpt.Histogram() + c, _ := ckpt.Count() var realCount int64 for _, c := range b.Counts { diff --git a/sdk/metric/minmaxsumcount_stress_test.go b/sdk/metric/minmaxsumcount_stress_test.go index c098e599503..c2b175048ca 100644 --- a/sdk/metric/minmaxsumcount_stress_test.go +++ b/sdk/metric/minmaxsumcount_stress_test.go @@ -25,8 +25,7 @@ import ( ) func TestStressInt64MinMaxSumCount(t *testing.T) { - desc := metric.NewDescriptor("some.minmaxsumcount", metric.ValueRecorderKind, metric.Int64NumberKind) - + desc := metric.NewDescriptor("some_metric", metric.ValueRecorderKind, metric.Int64NumberKind) alloc := minmaxsumcount.New(2, &desc) mmsc, ckpt := &alloc[0], &alloc[1] @@ -50,10 +49,10 @@ func TestStressInt64MinMaxSumCount(t *testing.T) { for time.Since(startTime) < time.Second { mmsc.Checkpoint(ckpt, &desc) - s, _ := mmsc.Sum() - c, _ := mmsc.Count() - min, e1 := mmsc.Min() - max, e2 := mmsc.Max() + s, _ := ckpt.Sum() + c, _ := ckpt.Count() + min, e1 := ckpt.Min() + max, e2 := ckpt.Max() if c == 0 && (e1 == nil || e2 == nil || s.AsInt64() != 0) { t.Fail() } diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 6a3dba1ceb7..5eb2d558d65 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -343,7 +343,7 @@ func float64sEqual(a, b api.Number) bool { func intCounterTestImpl() testImpl { return testImpl{ newInstrument: func(meter api.Meter, name string) SyncImpler { - return Must(meter).NewInt64Counter(name + ".counter") + return Must(meter).NewInt64Counter(name + ".sum") }, getUpdateValue: func() api.Number { for { @@ -381,7 +381,7 @@ func TestStressInt64Counter(t *testing.T) { func floatCounterTestImpl() testImpl { return testImpl{ newInstrument: func(meter api.Meter, name string) SyncImpler { - return Must(meter).NewFloat64Counter(name + ".counter") + return Must(meter).NewFloat64Counter(name + ".sum") }, getUpdateValue: func() api.Number { for { From 75670939d225f375683b9bff9c8b84affc72a3ff Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 22:41:14 -0700 Subject: [PATCH 12/21] Tests & lint passing --- api/global/internal/benchmark_test.go | 2 +- exporters/metric/stdout/stdout_test.go | 69 +++++++++++-------- exporters/metric/test/test.go | 15 ++++ exporters/otlp/go.sum | 1 + .../otlp/internal/transform/metric_test.go | 41 ++++++----- exporters/otlp/otlp_metric_test.go | 11 +-- sdk/metric/aggregator/array/array_test.go | 32 +++++---- .../aggregator/ddsketch/ddsketch_test.go | 16 +++-- sdk/metric/aggregator/histogram/histogram.go | 22 +++--- .../aggregator/histogram/histogram_test.go | 27 +++++--- .../aggregator/lastvalue/lastvalue_test.go | 21 ++++-- .../aggregator/minmaxsumcount/mmsc_test.go | 24 ++++--- sdk/metric/aggregator/sum/sum_test.go | 25 ++++--- sdk/metric/histogram_stress_test.go | 2 +- sdk/metric/minmaxsumcount_stress_test.go | 2 +- sdk/metric/sdk.go | 10 ++- 16 files changed, 197 insertions(+), 123 deletions(-) diff --git a/api/global/internal/benchmark_test.go b/api/global/internal/benchmark_test.go index fb2bb91dc8b..32fffcbf8e0 100644 --- a/api/global/internal/benchmark_test.go +++ b/api/global/internal/benchmark_test.go @@ -25,8 +25,8 @@ import ( "go.opentelemetry.io/otel/api/trace" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/integrator/test" sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/sdk/metric/integrator/test" ) var Must = metric.Must diff --git a/exporters/metric/stdout/stdout_test.go b/exporters/metric/stdout/stdout_test.go index e08af2d7e4f..7dc241abbc9 100644 --- a/exporters/metric/stdout/stdout_test.go +++ b/exporters/metric/stdout/stdout_test.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "strings" "testing" "time" @@ -99,11 +100,13 @@ func TestStdoutTimestamp(t *testing.T) { ctx := context.Background() desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Int64NumberKind) - lvagg := lastvalue.New() + + lvagg, ckpt := test.Unslice2(lastvalue.New(2)) + aggtest.CheckedUpdate(t, lvagg, metric.NewInt64Number(321), &desc) - lvagg.Checkpoint(&desc) + _ = lvagg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, lvagg) + checkpointSet.Add(&desc, ckpt) if err := exporter.Export(ctx, checkpointSet); err != nil { t.Fatal("Unexpected export error: ", err) @@ -144,11 +147,13 @@ func TestStdoutCounterFormat(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) desc := metric.NewDescriptor("test.name", metric.CounterKind, metric.Int64NumberKind) - cagg := sum.New() + + cagg, ckpt := test.Unslice2(sum.New(2)) + aggtest.CheckedUpdate(fix.t, cagg, metric.NewInt64Number(123), &desc) - cagg.Checkpoint(&desc) + _ = cagg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, cagg, kv.String("A", "B"), kv.String("C", "D")) + checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) fix.Export(checkpointSet) @@ -161,11 +166,12 @@ func TestStdoutLastValueFormat(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) - lvagg := lastvalue.New() + lvagg, ckpt := test.Unslice2(lastvalue.New(2)) + aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc) - lvagg.Checkpoint(&desc) + _ = lvagg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, lvagg, kv.String("A", "B"), kv.String("C", "D")) + checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) fix.Export(checkpointSet) @@ -178,12 +184,14 @@ func TestStdoutMinMaxSumCount(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) - magg := minmaxsumcount.New(&desc) + + magg, ckpt := test.Unslice2(minmaxsumcount.New(2, &desc)) + aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(123.456), &desc) aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(876.543), &desc) - magg.Checkpoint(&desc) + _ = magg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, magg, kv.String("A", "B"), kv.String("C", "D")) + checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) fix.Export(checkpointSet) @@ -198,15 +206,15 @@ func TestStdoutValueRecorderFormat(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) - magg := array.New() + aagg, ckpt := test.Unslice2(array.New(2)) for i := 0; i < 1000; i++ { - aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(float64(i)+0.5), &desc) + aggtest.CheckedUpdate(fix.t, aagg, metric.NewFloat64Number(float64(i)+0.5), &desc) } - magg.Checkpoint(&desc) + _ = aagg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, magg, kv.String("A", "B"), kv.String("C", "D")) + checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) fix.Export(checkpointSet) @@ -239,28 +247,27 @@ func TestStdoutValueRecorderFormat(t *testing.T) { func TestStdoutNoData(t *testing.T) { desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) - for name, tc := range map[string]export.Aggregator{ - "ddsketch": ddsketch.New(&desc, ddsketch.NewDefaultConfig()), - "minmaxsumcount": minmaxsumcount.New(&desc), - } { - tc := tc - t.Run(name, func(t *testing.T) { + + runTwoAggs := func(agg, ckpt export.Aggregator) { + t.Run(fmt.Sprintf("%T", agg), func(t *testing.T) { t.Parallel() fix := newFixture(t, stdout.Config{}) checkpointSet := test.NewCheckpointSet(testResource) - magg := tc - magg.Checkpoint(&desc) + _ = agg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, magg) + checkpointSet.Add(&desc, ckpt) fix.Export(checkpointSet) require.Equal(t, `{"updates":null}`, fix.Output()) }) } + + runTwoAggs(test.Unslice2(ddsketch.New(2, &desc, ddsketch.NewDefaultConfig()))) + runTwoAggs(test.Unslice2(minmaxsumcount.New(2, &desc))) } func TestStdoutLastValueNotSet(t *testing.T) { @@ -269,8 +276,9 @@ func TestStdoutLastValueNotSet(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) - lvagg := lastvalue.New() - lvagg.Checkpoint(&desc) + + lvagg, ckpt := test.Unslice2(lastvalue.New(2)) + _ = lvagg.Checkpoint(ckpt, &desc) checkpointSet.Add(&desc, lvagg, kv.String("A", "B"), kv.String("C", "D")) @@ -319,11 +327,12 @@ func TestStdoutResource(t *testing.T) { checkpointSet := test.NewCheckpointSet(tc.res) desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) - lvagg := lastvalue.New() + lvagg, ckpt := test.Unslice2(lastvalue.New(2)) + aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc) - lvagg.Checkpoint(&desc) + _ = lvagg.Checkpoint(ckpt, &desc) - checkpointSet.Add(&desc, lvagg, tc.attrs...) + checkpointSet.Add(&desc, ckpt, tc.attrs...) fix.Export(checkpointSet) diff --git a/exporters/metric/test/test.go b/exporters/metric/test/test.go index f509324630c..52c51cf80c7 100644 --- a/exporters/metric/test/test.go +++ b/exporters/metric/test/test.go @@ -17,6 +17,7 @@ package test import ( "context" "errors" + "reflect" "sync" "go.opentelemetry.io/otel/api/kv" @@ -122,3 +123,17 @@ func (p *CheckpointSet) ForEach(f func(export.Record) error) error { } return nil } + +// Takes a slice of []some.Aggregator and returns a slice of []export.Aggregator +func Unslice2(sl interface{}) (one, two export.Aggregator) { + slv := reflect.ValueOf(sl) + if slv.Type().Kind() != reflect.Slice { + panic("Invalid Unslice2") + } + if slv.Len() != 2 { + panic("Invalid Unslice2") + } + one = slv.Index(0).Addr().Interface().(export.Aggregator) + two = slv.Index(1).Addr().Interface().(export.Aggregator) + return +} diff --git a/exporters/otlp/go.sum b/exporters/otlp/go.sum index 905ac70bca2..c961a3bf694 100644 --- a/exporters/otlp/go.sum +++ b/exporters/otlp/go.sum @@ -56,6 +56,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/exporters/otlp/internal/transform/metric_test.go b/exporters/otlp/internal/transform/metric_test.go index 55c76c50225..3d596a2921e 100644 --- a/exporters/otlp/internal/transform/metric_test.go +++ b/exporters/otlp/internal/transform/metric_test.go @@ -27,6 +27,7 @@ import ( "go.opentelemetry.io/otel/api/label" "go.opentelemetry.io/otel/api/metric" "go.opentelemetry.io/otel/api/unit" + "go.opentelemetry.io/otel/exporters/metric/test" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount" sumAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" @@ -80,17 +81,18 @@ func TestStringKeyValues(t *testing.T) { } func TestMinMaxSumCountValue(t *testing.T) { - mmsc := minmaxsumcount.New(&metric.Descriptor{}) + mmsc, ckpt := test.Unslice2(minmaxsumcount.New(2, &metric.Descriptor{})) + assert.NoError(t, mmsc.Update(context.Background(), 1, &metric.Descriptor{})) assert.NoError(t, mmsc.Update(context.Background(), 10, &metric.Descriptor{})) // Prior to checkpointing ErrNoData should be returned. - _, _, _, _, err := minMaxSumCountValues(mmsc) + _, _, _, _, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount)) assert.EqualError(t, err, aggregation.ErrNoData.Error()) // Checkpoint to set non-zero values - mmsc.Checkpoint(&metric.Descriptor{}) - min, max, sum, count, err := minMaxSumCountValues(mmsc) + mmsc.Checkpoint(ckpt, &metric.Descriptor{}) + min, max, sum, count, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount)) if assert.NoError(t, err) { assert.Equal(t, min, metric.NewInt64Number(1)) assert.Equal(t, max, metric.NewInt64Number(10)) @@ -142,17 +144,17 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) { } ctx := context.Background() - mmsc := minmaxsumcount.New(&metric.Descriptor{}) + mmsc, ckpt := test.Unslice2(minmaxsumcount.New(2, &metric.Descriptor{})) if !assert.NoError(t, mmsc.Update(ctx, 1, &metric.Descriptor{})) { return } - mmsc.Checkpoint(&metric.Descriptor{}) + mmsc.Checkpoint(ckpt, &metric.Descriptor{}) for _, test := range tests { desc := metric.NewDescriptor(test.name, test.metricKind, test.numberKind, metric.WithDescription(test.description), metric.WithUnit(test.unit)) labels := label.NewSet(test.labels...) - got, err := minMaxSumCount(&desc, &labels, mmsc) + got, err := minMaxSumCount(&desc, &labels, ckpt.(aggregation.MinMaxSumCount)) if assert.NoError(t, err) { assert.Equal(t, test.expected, got.MetricDescriptor) } @@ -162,10 +164,11 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) { func TestMinMaxSumCountDatapoints(t *testing.T) { desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Int64NumberKind) labels := label.NewSet() - mmsc := minmaxsumcount.New(&desc) + mmsc, ckpt := test.Unslice2(minmaxsumcount.New(2, &desc)) + assert.NoError(t, mmsc.Update(context.Background(), 1, &desc)) assert.NoError(t, mmsc.Update(context.Background(), 10, &desc)) - mmsc.Checkpoint(&desc) + mmsc.Checkpoint(ckpt, &desc) expected := []*metricpb.SummaryDataPoint{ { Count: 2, @@ -182,7 +185,7 @@ func TestMinMaxSumCountDatapoints(t *testing.T) { }, }, } - m, err := minMaxSumCount(&desc, &labels, mmsc) + m, err := minMaxSumCount(&desc, &labels, ckpt.(aggregation.MinMaxSumCount)) if assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint(nil), m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint(nil), m.DoubleDataPoints) @@ -195,7 +198,7 @@ func TestMinMaxSumCountPropagatesErrors(t *testing.T) { // ErrNoData should be returned by both the Min and Max values of // a MinMaxSumCount Aggregator. Use this fact to check the error is // correctly returned. - mmsc := minmaxsumcount.New(&metric.Descriptor{}) + mmsc := &minmaxsumcount.New(1, &metric.Descriptor{})[0] _, _, _, _, err := minMaxSumCountValues(mmsc) assert.Error(t, err) assert.Equal(t, aggregation.ErrNoData, err) @@ -249,7 +252,7 @@ func TestSumMetricDescriptor(t *testing.T) { metric.WithUnit(test.unit), ) labels := label.NewSet(test.labels...) - got, err := sum(&desc, &labels, sumAgg.New()) + got, err := sum(&desc, &labels, &sumAgg.New(1)[0]) if assert.NoError(t, err) { assert.Equal(t, test.expected, got.MetricDescriptor) } @@ -259,10 +262,10 @@ func TestSumMetricDescriptor(t *testing.T) { func TestSumInt64DataPoints(t *testing.T) { desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Int64NumberKind) labels := label.NewSet() - s := sumAgg.New() + s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc)) - s.Checkpoint(&desc) - if m, err := sum(&desc, &labels, s); assert.NoError(t, err) { + s.Checkpoint(ckpt, &desc) + if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint{{Value: 1}}, m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint(nil), m.DoubleDataPoints) assert.Equal(t, []*metricpb.HistogramDataPoint(nil), m.HistogramDataPoints) @@ -273,10 +276,10 @@ func TestSumInt64DataPoints(t *testing.T) { func TestSumFloat64DataPoints(t *testing.T) { desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Float64NumberKind) labels := label.NewSet() - s := sumAgg.New() + s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc)) - s.Checkpoint(&desc) - if m, err := sum(&desc, &labels, s); assert.NoError(t, err) { + s.Checkpoint(ckpt, &desc) + if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint(nil), m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint{{Value: 1}}, m.DoubleDataPoints) assert.Equal(t, []*metricpb.HistogramDataPoint(nil), m.HistogramDataPoints) @@ -287,7 +290,7 @@ func TestSumFloat64DataPoints(t *testing.T) { func TestSumErrUnknownValueType(t *testing.T) { desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.NumberKind(-1)) labels := label.NewSet() - s := sumAgg.New() + s := &sumAgg.New(1)[0] _, err := sum(&desc, &labels, s) assert.Error(t, err) if !errors.Is(err, ErrUnknownValueType) { diff --git a/exporters/otlp/otlp_metric_test.go b/exporters/otlp/otlp_metric_test.go index 1afcfb44640..d2ae2ab47fc 100644 --- a/exporters/otlp/otlp_metric_test.go +++ b/exporters/otlp/otlp_metric_test.go @@ -29,6 +29,7 @@ import ( "go.opentelemetry.io/otel/api/kv" "go.opentelemetry.io/otel/api/label" "go.opentelemetry.io/otel/api/metric" + "go.opentelemetry.io/otel/exporters/metric/test" metricsdk "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount" @@ -635,12 +636,12 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me desc := metric.NewDescriptor(r.name, r.mKind, r.nKind, r.opts...) labs := label.NewSet(r.labels...) - var agg metricsdk.Aggregator + var agg, ckpt metricsdk.Aggregator switch r.mKind { case metric.CounterKind: - agg = sum.New() + agg, ckpt = test.Unslice2(sum.New(2)) default: - agg = minmaxsumcount.New(&desc) + agg, ckpt = test.Unslice2(minmaxsumcount.New(2, &desc)) } ctx := context.Background() @@ -657,11 +658,11 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me default: t.Fatalf("invalid number kind: %v", r.nKind) } - agg.Checkpoint(&desc) + agg.Checkpoint(ckpt, &desc) equiv := r.resource.Equivalent() resources[equiv] = r.resource - recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, r.resource, agg)) + recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, r.resource, ckpt)) } for _, records := range recs { assert.NoError(t, exp.Export(context.Background(), &checkpointSet{records: records})) diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index f4a276ffc4b..3e9cec247fb 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -52,11 +52,19 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { require.Equal(t, kind.Zero(), min) } +func new2() (_, _ *Aggregator) { + alloc := New(2) + return &alloc[0], &alloc[1] +} + +func new4() (_, _, _, _ *Aggregator) { + alloc := New(4) + return &alloc[0], &alloc[1], &alloc[2], &alloc[3] +} + func (ut *updateTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - - alloc := New(2) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2() all := test.NewNumbers(profile.NumberKind) @@ -123,9 +131,7 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - - alloc := New(4) - agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] + agg1, agg2, ckpt1, ckpt2 := new4() all := test.NewNumbers(profile.NumberKind) @@ -149,8 +155,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - agg1.Checkpoint(ckpt1, descriptor) - agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.Checkpoint(ckpt1, descriptor) + _ = agg2.Checkpoint(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) @@ -206,8 +212,7 @@ func TestArrayMerge(t *testing.T) { func TestArrayErrors(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - alloc := New(2) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2() _, err := ckpt.Max() require.Error(t, err) @@ -228,7 +233,7 @@ func TestArrayErrors(t *testing.T) { if profile.NumberKind == metric.Float64NumberKind { test.CheckedUpdate(t, agg, metric.NewFloat64Number(math.NaN()), descriptor) } - agg.Checkpoint(ckpt, descriptor) + _ = agg.Checkpoint(ckpt, descriptor) count, err := ckpt.Count() require.Equal(t, int64(1), count, "NaN value was not counted") @@ -281,8 +286,7 @@ func TestArrayFloat64(t *testing.T) { all := test.NewNumbers(metric.Float64NumberKind) - alloc := New(2) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2() for _, f := range fpsf(1) { all.Append(metric.NewFloat64Number(f)) @@ -294,7 +298,7 @@ func TestArrayFloat64(t *testing.T) { test.CheckedUpdate(t, agg, metric.NewFloat64Number(f), descriptor) } - agg.Checkpoint(ckpt, descriptor) + _ = agg.Checkpoint(ckpt, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 48a52d1979c..e69cf1e285c 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -31,6 +31,16 @@ const count = 1000 type updateTest struct { } +func new2(desc *metric.Descriptor) (_, _ *Aggregator) { + alloc := New(2, desc, NewDefaultConfig()) + return &alloc[0], &alloc[1] +} + +func new4(desc *metric.Descriptor) (_, _, _, _ *Aggregator) { + alloc := New(4, desc, NewDefaultConfig()) + return &alloc[0], &alloc[1], &alloc[2], &alloc[3] +} + func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { kind := desc.NumberKind() @@ -57,8 +67,7 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func (ut *updateTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := New(2, descriptor, NewDefaultConfig()) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2(descriptor) all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { @@ -120,8 +129,7 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := New(4, descriptor, NewDefaultConfig()) - agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] + agg1, agg2, ckpt1, ckpt2 := new4(descriptor) all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go index 40d60e45da9..b3daa42a845 100644 --- a/sdk/metric/aggregator/histogram/histogram.go +++ b/sdk/metric/aggregator/histogram/histogram.go @@ -37,7 +37,7 @@ type ( lock sync.Mutex boundaries []float64 kind metric.NumberKind - state + state state } // state represents the state of a histogram, consisting of @@ -90,19 +90,19 @@ func (c *Aggregator) Kind() aggregation.Kind { // Sum returns the sum of all values in the checkpoint. func (c *Aggregator) Sum() (metric.Number, error) { - return c.sum, nil + return c.state.sum, nil } // Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { - return int64(c.count), nil + return int64(c.state.count), nil } // Histogram returns the count of events in pre-determined buckets. func (c *Aggregator) Histogram() (aggregation.Buckets, error) { return aggregation.Buckets{ Boundaries: c.boundaries, - Counts: c.bucketCounts, + Counts: c.state.bucketCounts, }, nil } @@ -155,9 +155,9 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri c.lock.Lock() defer c.lock.Unlock() - c.count.AddInt64(1) - c.sum.AddNumber(kind, number) - c.bucketCounts[bucketID]++ + c.state.count.AddInt64(1) + c.state.sum.AddNumber(kind, number) + c.state.bucketCounts[bucketID]++ return nil } @@ -169,11 +169,11 @@ func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error return aggregator.NewInconsistentAggregatorError(c, oa) } - c.sum.AddNumber(desc.NumberKind(), o.sum) - c.count.AddNumber(metric.Uint64NumberKind, o.count) + c.state.sum.AddNumber(desc.NumberKind(), o.state.sum) + c.state.count.AddNumber(metric.Uint64NumberKind, o.state.count) - for i := 0; i < len(c.bucketCounts); i++ { - c.bucketCounts[i] += o.bucketCounts[i] + for i := 0; i < len(c.state.bucketCounts); i++ { + c.state.bucketCounts[i] += o.state.bucketCounts[i] } return nil } diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go index bfe2d32407e..c3044815f7a 100644 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ b/sdk/metric/aggregator/histogram/histogram_test.go @@ -60,6 +60,16 @@ var ( boundaries = []float64{500, 250, 750} ) +func new2(desc *metric.Descriptor) (_, _ *histogram.Aggregator) { + alloc := histogram.New(2, desc, boundaries) + return &alloc[0], &alloc[1] +} + +func new4(desc *metric.Descriptor) (_, _, _, _ *histogram.Aggregator) { + alloc := histogram.New(4, desc, boundaries) + return &alloc[0], &alloc[1], &alloc[2], &alloc[3] +} + func checkZero(t *testing.T, agg *histogram.Aggregator, desc *metric.Descriptor) { asum, err := agg.Sum() require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") @@ -101,8 +111,7 @@ func TestHistogramPositiveAndNegative(t *testing.T) { func testHistogram(t *testing.T, profile test.Profile, policy policy) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := histogram.New(2, descriptor, boundaries) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2(descriptor) all := test.NewNumbers(profile.NumberKind) @@ -112,7 +121,7 @@ func testHistogram(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - agg.Checkpoint(ckpt, descriptor) + _ = agg.Checkpoint(ckpt, descriptor) checkZero(t, agg, descriptor) @@ -147,7 +156,7 @@ func TestHistogramInitial(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - agg := histogram.New(1, descriptor, boundaries)[0] + agg := &histogram.New(1, descriptor, boundaries)[0] buckets, err := agg.Histogram() require.NoError(t, err) @@ -160,8 +169,7 @@ func TestHistogramMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := histogram.New(4, descriptor, boundaries) - agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] + agg1, agg2, ckpt1, ckpt2 := new4(descriptor) all := test.NewNumbers(profile.NumberKind) @@ -176,8 +184,8 @@ func TestHistogramMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - agg1.Checkpoint(ckpt1, descriptor) - agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.Checkpoint(ckpt1, descriptor) + _ = agg2.Checkpoint(ckpt2, descriptor) test.CheckedMerge(t, ckpt1, ckpt2, descriptor) @@ -213,8 +221,7 @@ func TestHistogramNotSet(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := histogram.New(2, descriptor, boundaries) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2(descriptor) err := agg.Checkpoint(ckpt, descriptor) require.NoError(t, err) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go index ae5ad25f27b..0eadf224f0b 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue_test.go @@ -50,6 +50,16 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func new2() (_, _ *Aggregator) { + alloc := New(2) + return &alloc[0], &alloc[1] +} + +func new4() (_, _, _, _ *Aggregator) { + alloc := New(4) + return &alloc[0], &alloc[1], &alloc[2], &alloc[3] +} + func checkZero(t *testing.T, agg *Aggregator) { lv, ts, err := agg.LastValue() require.True(t, errors.Is(err, aggregation.ErrNoData)) @@ -59,8 +69,7 @@ func checkZero(t *testing.T, agg *Aggregator) { func TestLastValueUpdate(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - alloc := New(2) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2() record := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind) @@ -82,8 +91,7 @@ func TestLastValueUpdate(t *testing.T) { func TestLastValueMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - alloc := New(4) - agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] + agg1, agg2, ckpt1, ckpt2 := new4() descriptor := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind) @@ -118,9 +126,8 @@ func TestLastValueMerge(t *testing.T) { func TestLastValueNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueObserverKind, metric.Int64NumberKind) - g := &New(1)[0] - ckpt := &New(1)[0] - g.Checkpoint(ckpt, descriptor) + g, ckpt := new2() + _ = g.Checkpoint(ckpt, descriptor) checkZero(t, g) } diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go index 94cf7f5321f..55a3bc3d72c 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go @@ -76,6 +76,16 @@ func TestMinMaxSumCountPositiveAndNegative(t *testing.T) { }) } +func new2(desc *metric.Descriptor) (_, _ *Aggregator) { + alloc := New(2, desc) + return &alloc[0], &alloc[1] +} + +func new4(desc *metric.Descriptor) (_, _, _, _ *Aggregator) { + alloc := New(4, desc) + return &alloc[0], &alloc[1], &alloc[2], &alloc[3] +} + func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { kind := desc.NumberKind() @@ -100,8 +110,7 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := New(2, descriptor) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2(descriptor) all := test.NewNumbers(profile.NumberKind) @@ -111,7 +120,7 @@ func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - agg.Checkpoint(ckpt, descriptor) + _ = agg.Checkpoint(ckpt, descriptor) checkZero(t, agg, descriptor) @@ -149,8 +158,7 @@ func TestMinMaxSumCountMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) - alloc := New(4, descriptor) - agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] + agg1, agg2, ckpt1, ckpt2 := new4(descriptor) all := test.NewNumbers(profile.NumberKind) @@ -165,8 +173,8 @@ func TestMinMaxSumCountMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - agg1.Checkpoint(ckpt1, descriptor) - agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.Checkpoint(ckpt1, descriptor) + _ = agg2.Checkpoint(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) @@ -211,7 +219,7 @@ func TestMaxSumCountNotSet(t *testing.T) { alloc := New(2, descriptor) agg, ckpt := &alloc[0], &alloc[1] - agg.Checkpoint(ckpt, descriptor) + _ = agg.Checkpoint(ckpt, descriptor) asum, err := ckpt.Sum() require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index 81f1c43c19d..b7bd6e655e5 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -43,6 +43,16 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func new2() (_, _ *Aggregator) { + alloc := New(2) + return &alloc[0], &alloc[1] +} + +func new4() (_, _, _, _ *Aggregator) { + alloc := New(4) + return &alloc[0], &alloc[1], &alloc[2], &alloc[3] +} + func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { kind := desc.NumberKind() @@ -53,8 +63,7 @@ func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) { func TestCounterSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - alloc := New(2) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2() descriptor := test.NewAggregatorTest(metric.CounterKind, profile.NumberKind) @@ -78,8 +87,7 @@ func TestCounterSum(t *testing.T) { func TestValueRecorderSum(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - alloc := New(2) - agg, ckpt := &alloc[0], &alloc[1] + agg, ckpt := new2() descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind) @@ -94,7 +102,7 @@ func TestValueRecorderSum(t *testing.T) { sum.AddNumber(profile.NumberKind, r2) } - agg.Checkpoint(ckpt, descriptor) + _ = agg.Checkpoint(ckpt, descriptor) checkZero(t, agg, descriptor) asum, err := ckpt.Sum() @@ -105,8 +113,7 @@ func TestValueRecorderSum(t *testing.T) { func TestCounterMerge(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - alloc := New(4) - agg1, agg2, ckpt1, ckpt2 := &alloc[0], &alloc[1], &alloc[2], &alloc[3] + agg1, agg2, ckpt1, ckpt2 := new4() descriptor := test.NewAggregatorTest(metric.CounterKind, profile.NumberKind) @@ -118,8 +125,8 @@ func TestCounterMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - agg1.Checkpoint(ckpt1, descriptor) - agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.Checkpoint(ckpt1, descriptor) + _ = agg2.Checkpoint(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) diff --git a/sdk/metric/histogram_stress_test.go b/sdk/metric/histogram_stress_test.go index 0526a8b3d82..13f7625dd1e 100644 --- a/sdk/metric/histogram_stress_test.go +++ b/sdk/metric/histogram_stress_test.go @@ -46,7 +46,7 @@ func TestStressInt64Histogram(t *testing.T) { startTime := time.Now() for time.Since(startTime) < time.Second { - h.Checkpoint(ckpt, &desc) + _ = h.Checkpoint(ckpt, &desc) b, _ := ckpt.Histogram() c, _ := ckpt.Count() diff --git a/sdk/metric/minmaxsumcount_stress_test.go b/sdk/metric/minmaxsumcount_stress_test.go index c2b175048ca..3419808ebb3 100644 --- a/sdk/metric/minmaxsumcount_stress_test.go +++ b/sdk/metric/minmaxsumcount_stress_test.go @@ -47,7 +47,7 @@ func TestStressInt64MinMaxSumCount(t *testing.T) { startTime := time.Now() for time.Since(startTime) < time.Second { - mmsc.Checkpoint(ckpt, &desc) + _ = mmsc.Checkpoint(ckpt, &desc) s, _ := ckpt.Sum() c, _ := ckpt.Count() diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 54d0f0a10d0..3b312644c26 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -438,10 +438,14 @@ func (m *Accumulator) checkpointRecord(r *record) int { if r.current == nil { return 0 } - r.current.Checkpoint(r.checkpoint, &r.inst.descriptor) + err := r.current.Checkpoint(r.checkpoint, &r.inst.descriptor) + if err != nil { + global.Handle(err) + return 0 + } exportRecord := export.NewRecord(&r.inst.descriptor, r.labels, m.resource, r.checkpoint) - err := m.integrator.Process(exportRecord) + err = m.integrator.Process(exportRecord) if err != nil { global.Handle(err) } @@ -463,7 +467,7 @@ func (m *Accumulator) checkpointAsync(a *asyncInstrument) int { if err != nil { global.Handle(err) } - checkpointed += 1 + checkpointed++ } } else if epochDiff > 1 { // This is second collection cycle with no From 6d13333cda87ba1a92ca41f6fc30cb9f9e840fc3 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 22:47:57 -0700 Subject: [PATCH 13/21] Tests & lint passing --- exporters/otlp/go.sum | 1 - exporters/otlp/internal/transform/metric_test.go | 10 +++++----- exporters/otlp/otlp_metric_test.go | 2 +- go.sum | 1 - 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/exporters/otlp/go.sum b/exporters/otlp/go.sum index c961a3bf694..905ac70bca2 100644 --- a/exporters/otlp/go.sum +++ b/exporters/otlp/go.sum @@ -56,7 +56,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/exporters/otlp/internal/transform/metric_test.go b/exporters/otlp/internal/transform/metric_test.go index 3d596a2921e..d7ed7dd818d 100644 --- a/exporters/otlp/internal/transform/metric_test.go +++ b/exporters/otlp/internal/transform/metric_test.go @@ -91,7 +91,7 @@ func TestMinMaxSumCountValue(t *testing.T) { assert.EqualError(t, err, aggregation.ErrNoData.Error()) // Checkpoint to set non-zero values - mmsc.Checkpoint(ckpt, &metric.Descriptor{}) + _ = mmsc.Checkpoint(ckpt, &metric.Descriptor{}) min, max, sum, count, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount)) if assert.NoError(t, err) { assert.Equal(t, min, metric.NewInt64Number(1)) @@ -148,7 +148,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) { if !assert.NoError(t, mmsc.Update(ctx, 1, &metric.Descriptor{})) { return } - mmsc.Checkpoint(ckpt, &metric.Descriptor{}) + _ = mmsc.Checkpoint(ckpt, &metric.Descriptor{}) for _, test := range tests { desc := metric.NewDescriptor(test.name, test.metricKind, test.numberKind, metric.WithDescription(test.description), @@ -168,7 +168,7 @@ func TestMinMaxSumCountDatapoints(t *testing.T) { assert.NoError(t, mmsc.Update(context.Background(), 1, &desc)) assert.NoError(t, mmsc.Update(context.Background(), 10, &desc)) - mmsc.Checkpoint(ckpt, &desc) + _ = mmsc.Checkpoint(ckpt, &desc) expected := []*metricpb.SummaryDataPoint{ { Count: 2, @@ -264,7 +264,7 @@ func TestSumInt64DataPoints(t *testing.T) { labels := label.NewSet() s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc)) - s.Checkpoint(ckpt, &desc) + _ = s.Checkpoint(ckpt, &desc) if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint{{Value: 1}}, m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint(nil), m.DoubleDataPoints) @@ -278,7 +278,7 @@ func TestSumFloat64DataPoints(t *testing.T) { labels := label.NewSet() s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc)) - s.Checkpoint(ckpt, &desc) + _ = s.Checkpoint(ckpt, &desc) if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint(nil), m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint{{Value: 1}}, m.DoubleDataPoints) diff --git a/exporters/otlp/otlp_metric_test.go b/exporters/otlp/otlp_metric_test.go index d2ae2ab47fc..03f8cedae71 100644 --- a/exporters/otlp/otlp_metric_test.go +++ b/exporters/otlp/otlp_metric_test.go @@ -658,7 +658,7 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me default: t.Fatalf("invalid number kind: %v", r.nKind) } - agg.Checkpoint(ckpt, &desc) + _ = agg.Checkpoint(ckpt, &desc) equiv := r.resource.Equivalent() resources[equiv] = r.resource diff --git a/go.sum b/go.sum index a6a0897d988..613963bb6ab 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= From e5996397680f4d196845e8aef1e47089ac969778 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 10 Jun 2020 23:46:06 -0700 Subject: [PATCH 14/21] Missing file --- exporters/metric/test/test_test.go | 50 ++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 exporters/metric/test/test_test.go diff --git a/exporters/metric/test/test_test.go b/exporters/metric/test/test_test.go new file mode 100644 index 00000000000..42935596c7f --- /dev/null +++ b/exporters/metric/test/test_test.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/api/metric" + export "go.opentelemetry.io/otel/sdk/export/metric" +) + +type testAgg struct{} + +var _ export.Aggregator = (*testAgg)(nil) + +func (ta *testAgg) Update(context.Context, metric.Number, *metric.Descriptor) error { + return nil +} + +func (ta *testAgg) Checkpoint(export.Aggregator, *metric.Descriptor) error { + return nil +} + +func (ta *testAgg) Merge(export.Aggregator, *metric.Descriptor) error { + return nil +} + +func TestUnslice(t *testing.T) { + in := make([]testAgg, 2) + + a, b := Unslice2(in) + + require.Equal(t, a.(*testAgg), &in[0]) + require.Equal(t, b.(*testAgg), &in[1]) +} From 6a65f9f9166db077d8643284ffeac160210c88dc Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 11 Jun 2020 00:01:20 -0700 Subject: [PATCH 15/21] Remove dead test code --- exporters/metric/test/test.go | 64 ++++++++--------------- exporters/metric/test/test_test.go | 26 ++------- sdk/metric/aggregator/array/array_test.go | 3 +- 3 files changed, 27 insertions(+), 66 deletions(-) diff --git a/exporters/metric/test/test.go b/exporters/metric/test/test.go index 52c51cf80c7..79bdaa6dc8e 100644 --- a/exporters/metric/test/test.go +++ b/exporters/metric/test/test.go @@ -25,10 +25,6 @@ import ( "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregation" - "go.opentelemetry.io/otel/sdk/metric/aggregator/array" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" "go.opentelemetry.io/otel/sdk/resource" ) @@ -37,6 +33,7 @@ type mapkey struct { distinct label.Distinct } +// CheckpointSet is useful for testing Exporters. type CheckpointSet struct { sync.RWMutex records map[mapkey]export.Record @@ -44,6 +41,26 @@ type CheckpointSet struct { resource *resource.Resource } +// NoopAggregator is useful for testing Exporters. +type NoopAggregator struct{} + +var _ export.Aggregator = (*NoopAggregator)(nil) + +// Update implements export.Aggregator. +func (*NoopAggregator) Update(context.Context, metric.Number, *metric.Descriptor) error { + return nil +} + +// Checkpoint implements export.Aggregator. +func (*NoopAggregator) Checkpoint(export.Aggregator, *metric.Descriptor) error { + return nil +} + +// Merge implements export.Aggregator. +func (*NoopAggregator) Merge(export.Aggregator, *metric.Descriptor) error { + return nil +} + // NewCheckpointSet returns a test CheckpointSet that new records could be added. // Records are grouped by their encoded labels. func NewCheckpointSet(resource *resource.Resource) *CheckpointSet { @@ -53,12 +70,13 @@ func NewCheckpointSet(resource *resource.Resource) *CheckpointSet { } } +// Reset clears the Aggregator state. func (p *CheckpointSet) Reset() { p.records = make(map[mapkey]export.Record) p.updates = nil } -// Add a new descriptor to a Checkpoint. +// Add a new record to a CheckpointSet. // // If there is an existing record with the same descriptor and labels, // the stored aggregator will be returned and should be merged. @@ -79,42 +97,6 @@ func (p *CheckpointSet) Add(desc *metric.Descriptor, newAgg export.Aggregator, l return newAgg, true } -func createNumber(desc *metric.Descriptor, v float64) metric.Number { - if desc.NumberKind() == metric.Float64NumberKind { - return metric.NewFloat64Number(v) - } - return metric.NewInt64Number(int64(v)) -} - -func (p *CheckpointSet) AddLastValue(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, &lastvalue.New(1)[0], v, labels...) -} - -func (p *CheckpointSet) AddCounter(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, &sum.New(1)[0], v, labels...) -} - -func (p *CheckpointSet) AddValueRecorder(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, &array.New(1)[0], v, labels...) -} - -func (p *CheckpointSet) AddHistogramValueRecorder(desc *metric.Descriptor, boundaries []float64, v float64, labels ...kv.KeyValue) { - p.updateAggregator(desc, &histogram.New(1, desc, boundaries)[0], v, labels...) -} - -func (p *CheckpointSet) updateAggregator(desc *metric.Descriptor, newAgg export.Aggregator, v float64, labels ...kv.KeyValue) { - ctx := context.Background() - // Updates and checkpoint the new aggregator - _ = newAgg.Update(ctx, createNumber(desc, v), desc) - - // Try to add this aggregator to the CheckpointSet - agg, added := p.Add(desc, newAgg, labels...) - if !added { - // An aggregator already exist for this descriptor and label set, we should merge them. - _ = agg.Merge(newAgg, desc) - } -} - func (p *CheckpointSet) ForEach(f func(export.Record) error) error { for _, r := range p.updates { if err := f(r); err != nil && !errors.Is(err, aggregation.ErrNoData) { diff --git a/exporters/metric/test/test_test.go b/exporters/metric/test/test_test.go index 42935596c7f..711c6ab4719 100644 --- a/exporters/metric/test/test_test.go +++ b/exporters/metric/test/test_test.go @@ -15,36 +15,16 @@ package test import ( - "context" "testing" "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/api/metric" - export "go.opentelemetry.io/otel/sdk/export/metric" ) -type testAgg struct{} - -var _ export.Aggregator = (*testAgg)(nil) - -func (ta *testAgg) Update(context.Context, metric.Number, *metric.Descriptor) error { - return nil -} - -func (ta *testAgg) Checkpoint(export.Aggregator, *metric.Descriptor) error { - return nil -} - -func (ta *testAgg) Merge(export.Aggregator, *metric.Descriptor) error { - return nil -} - func TestUnslice(t *testing.T) { - in := make([]testAgg, 2) + in := make([]NoopAggregator, 2) a, b := Unslice2(in) - require.Equal(t, a.(*testAgg), &in[0]) - require.Equal(t, b.(*testAgg), &in[1]) + require.Equal(t, a.(*NoopAggregator), &in[0]) + require.Equal(t, b.(*NoopAggregator), &in[1]) } diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 3e9cec247fb..ad52d15a294 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -15,12 +15,11 @@ package array import ( + "errors" "fmt" "math" "testing" - "errors" - "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/api/metric" From 6b4bfd7f3ad2704bb76b59defc1c41d4da4e95a1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 11 Jun 2020 00:28:24 -0700 Subject: [PATCH 16/21] Cleanups --- sdk/export/metric/metric.go | 39 ++++++++++++++++------------- sdk/metric/aggregator/aggregator.go | 2 +- sdk/metric/sdk.go | 14 ----------- 3 files changed, 22 insertions(+), 33 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 4b4b65fdbb4..e4a53c28267 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -75,8 +75,8 @@ type Integrator interface { type AggregationSelector interface { // AggregatorFor allocates a variable number of aggregators of // a kind suitable for the requested export. This method - // supports a variable-number of allocations to support making - // a single allocation. + // initializes a `...*Aggregator`, to support making a single + // allocation. // // When the call returns without initializing the *Aggregator // to a non-nil value, the metric instrument is explicitly @@ -84,7 +84,8 @@ type AggregationSelector interface { // // This must return a consistent type to avoid confusion in // later stages of the metrics export process, i.e., when - // Merging multiple aggregators for a specific instrument. + // Merging or Checkpointing aggregators for a specific + // instrument. // // Note: This is context-free because the aggregator should // not relate to the incoming context. This call should not @@ -105,37 +106,39 @@ type AggregationSelector interface { // MinMaxSumCount aggregator to a Counter instrument. type Aggregator interface { // Update receives a new measured value and incorporates it - // into the aggregation. Update() calls may arrive - // concurrently as the SDK does not provide synchronization. + // into the aggregation. Update() calls may be called + // concurrently. // // Descriptor.NumberKind() should be consulted to determine // whether the provided number is an int64 or float64. // // The Context argument comes from user-level code and could be - // inspected for distributed or span context. + // inspected for a `correlation.Map` or `trace.SpanContext`. Update(context.Context, metric.Number, *metric.Descriptor) error // Checkpoint is called during collection to finish one period - // of aggregation by atomically saving the current value into - // the argument. + // of aggregation by atomically saving the current state into + // the argument Aggregator. // - // Checkpoint() is called concurrently with Update(). - // Checkpoint should reset the current state to the empty - // state, in order to begin computing a new delta for the next - // collection period. + // Checkpoint() is called concurrently with Update(). These + // two methods must be synchronized with respect to each + // other, for correctness. // - // After the checkpoint is taken, the current value may be - // accessed using by converting to one a suitable interface - // types in the `aggregator` sub-package. + // The checkpointed Aggregator can be converted into one of + // the interfaces in the `aggregator` sub-package, according + // to kind of Aggregator that was selected. // // This call has no Context argument because it is expected to // perform only computation. Checkpoint(Aggregator, *metric.Descriptor) error // Merge combines the checkpointed state from the argument - // aggregator into this aggregator's checkpointed state. - // Merge() is called in a single-threaded context, no locking - // is required. + // Aggregator into this Aggregator. Merge is not synchronized + // with respect to Update, and should only be called on + // checkpoiunted Aggregators. + // + // The owner of a checkpointed + // Aggregator is responsible for synchronization. Merge(Aggregator, *metric.Descriptor) error } diff --git a/sdk/metric/aggregator/aggregator.go b/sdk/metric/aggregator/aggregator.go index bccc884417c..f0d30ba5a76 100644 --- a/sdk/metric/aggregator/aggregator.go +++ b/sdk/metric/aggregator/aggregator.go @@ -24,7 +24,7 @@ import ( ) // NewInconsistentAggregatorError formats an error describing an attempt to -// merge different-type aggregators. The result can be unwrapped as +// Checkpoint or Merge different-type aggregators. The result can be unwrapped as // an ErrInconsistentType. func NewInconsistentAggregatorError(a1, a2 export.Aggregator) error { return fmt.Errorf("%w: %T and %T", aggregation.ErrInconsistentType, a1, a2) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 3b312644c26..8bc6933a41e 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -482,20 +482,6 @@ func (m *Accumulator) checkpointAsync(a *asyncInstrument) int { return checkpointed } -// func (m *Accumulator) checkpoint(descriptor *metric.Descriptor, recorder export.Aggregator, labels *label.Set) int { -// if recorder == nil { -// return 0 -// } -// recorder.Checkpoint(descriptor) - -// exportRecord := export.NewRecord(descriptor, labels, m.resource, recorder) -// err := m.integrator.Process(exportRecord) -// if err != nil { -// global.Handle(err) -// } -// return 1 -// } - // RecordBatch enters a batch of metric events. func (m *Accumulator) RecordBatch(ctx context.Context, kvs []kv.KeyValue, measurements ...api.Measurement) { // Labels will be computed the first time acquireHandle is From 0516fc4200db9406d9c5f23e03f1ee2e77202f18 Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 12 Jun 2020 10:05:54 -0700 Subject: [PATCH 17/21] Checkpoint->SynchronizedCopy --- exporters/metric/stdout/stdout_test.go | 16 ++++++++-------- exporters/metric/test/test.go | 2 +- exporters/otlp/internal/transform/metric_test.go | 10 +++++----- exporters/otlp/otlp_metric_test.go | 2 +- sdk/export/metric/metric.go | 12 ++++++------ sdk/metric/aggregator/array/array.go | 6 +++--- sdk/metric/aggregator/array/array_test.go | 10 +++++----- sdk/metric/aggregator/ddsketch/ddsketch.go | 4 ++-- sdk/metric/aggregator/ddsketch/ddsketch_test.go | 6 +++--- sdk/metric/aggregator/histogram/histogram.go | 2 +- .../aggregator/histogram/histogram_test.go | 8 ++++---- sdk/metric/aggregator/lastvalue/lastvalue.go | 2 +- .../aggregator/lastvalue/lastvalue_test.go | 8 ++++---- sdk/metric/aggregator/minmaxsumcount/mmsc.go | 4 ++-- .../aggregator/minmaxsumcount/mmsc_test.go | 8 ++++---- sdk/metric/aggregator/sum/sum.go | 2 +- sdk/metric/aggregator/sum/sum_test.go | 8 ++++---- sdk/metric/histogram_stress_test.go | 2 +- sdk/metric/integrator/simple/simple_test.go | 4 ++-- sdk/metric/minmaxsumcount_stress_test.go | 2 +- sdk/metric/sdk.go | 2 +- 21 files changed, 60 insertions(+), 60 deletions(-) diff --git a/exporters/metric/stdout/stdout_test.go b/exporters/metric/stdout/stdout_test.go index 7dc241abbc9..e1556c8ca75 100644 --- a/exporters/metric/stdout/stdout_test.go +++ b/exporters/metric/stdout/stdout_test.go @@ -104,7 +104,7 @@ func TestStdoutTimestamp(t *testing.T) { lvagg, ckpt := test.Unslice2(lastvalue.New(2)) aggtest.CheckedUpdate(t, lvagg, metric.NewInt64Number(321), &desc) - _ = lvagg.Checkpoint(ckpt, &desc) + _ = lvagg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt) @@ -151,7 +151,7 @@ func TestStdoutCounterFormat(t *testing.T) { cagg, ckpt := test.Unslice2(sum.New(2)) aggtest.CheckedUpdate(fix.t, cagg, metric.NewInt64Number(123), &desc) - _ = cagg.Checkpoint(ckpt, &desc) + _ = cagg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -169,7 +169,7 @@ func TestStdoutLastValueFormat(t *testing.T) { lvagg, ckpt := test.Unslice2(lastvalue.New(2)) aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc) - _ = lvagg.Checkpoint(ckpt, &desc) + _ = lvagg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -189,7 +189,7 @@ func TestStdoutMinMaxSumCount(t *testing.T) { aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(123.456), &desc) aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(876.543), &desc) - _ = magg.Checkpoint(ckpt, &desc) + _ = magg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -212,7 +212,7 @@ func TestStdoutValueRecorderFormat(t *testing.T) { aggtest.CheckedUpdate(fix.t, aagg, metric.NewFloat64Number(float64(i)+0.5), &desc) } - _ = aagg.Checkpoint(ckpt, &desc) + _ = aagg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -256,7 +256,7 @@ func TestStdoutNoData(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) - _ = agg.Checkpoint(ckpt, &desc) + _ = agg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt) @@ -278,7 +278,7 @@ func TestStdoutLastValueNotSet(t *testing.T) { desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) lvagg, ckpt := test.Unslice2(lastvalue.New(2)) - _ = lvagg.Checkpoint(ckpt, &desc) + _ = lvagg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, lvagg, kv.String("A", "B"), kv.String("C", "D")) @@ -330,7 +330,7 @@ func TestStdoutResource(t *testing.T) { lvagg, ckpt := test.Unslice2(lastvalue.New(2)) aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc) - _ = lvagg.Checkpoint(ckpt, &desc) + _ = lvagg.SynchronizedCopy(ckpt, &desc) checkpointSet.Add(&desc, ckpt, tc.attrs...) diff --git a/exporters/metric/test/test.go b/exporters/metric/test/test.go index 79bdaa6dc8e..d196e511786 100644 --- a/exporters/metric/test/test.go +++ b/exporters/metric/test/test.go @@ -52,7 +52,7 @@ func (*NoopAggregator) Update(context.Context, metric.Number, *metric.Descriptor } // Checkpoint implements export.Aggregator. -func (*NoopAggregator) Checkpoint(export.Aggregator, *metric.Descriptor) error { +func (*NoopAggregator) SynchronizedCopy(export.Aggregator, *metric.Descriptor) error { return nil } diff --git a/exporters/otlp/internal/transform/metric_test.go b/exporters/otlp/internal/transform/metric_test.go index d7ed7dd818d..02d275d2b5b 100644 --- a/exporters/otlp/internal/transform/metric_test.go +++ b/exporters/otlp/internal/transform/metric_test.go @@ -91,7 +91,7 @@ func TestMinMaxSumCountValue(t *testing.T) { assert.EqualError(t, err, aggregation.ErrNoData.Error()) // Checkpoint to set non-zero values - _ = mmsc.Checkpoint(ckpt, &metric.Descriptor{}) + _ = mmsc.SynchronizedCopy(ckpt, &metric.Descriptor{}) min, max, sum, count, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount)) if assert.NoError(t, err) { assert.Equal(t, min, metric.NewInt64Number(1)) @@ -148,7 +148,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) { if !assert.NoError(t, mmsc.Update(ctx, 1, &metric.Descriptor{})) { return } - _ = mmsc.Checkpoint(ckpt, &metric.Descriptor{}) + _ = mmsc.SynchronizedCopy(ckpt, &metric.Descriptor{}) for _, test := range tests { desc := metric.NewDescriptor(test.name, test.metricKind, test.numberKind, metric.WithDescription(test.description), @@ -168,7 +168,7 @@ func TestMinMaxSumCountDatapoints(t *testing.T) { assert.NoError(t, mmsc.Update(context.Background(), 1, &desc)) assert.NoError(t, mmsc.Update(context.Background(), 10, &desc)) - _ = mmsc.Checkpoint(ckpt, &desc) + _ = mmsc.SynchronizedCopy(ckpt, &desc) expected := []*metricpb.SummaryDataPoint{ { Count: 2, @@ -264,7 +264,7 @@ func TestSumInt64DataPoints(t *testing.T) { labels := label.NewSet() s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc)) - _ = s.Checkpoint(ckpt, &desc) + _ = s.SynchronizedCopy(ckpt, &desc) if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint{{Value: 1}}, m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint(nil), m.DoubleDataPoints) @@ -278,7 +278,7 @@ func TestSumFloat64DataPoints(t *testing.T) { labels := label.NewSet() s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc)) - _ = s.Checkpoint(ckpt, &desc) + _ = s.SynchronizedCopy(ckpt, &desc) if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint(nil), m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint{{Value: 1}}, m.DoubleDataPoints) diff --git a/exporters/otlp/otlp_metric_test.go b/exporters/otlp/otlp_metric_test.go index 03f8cedae71..b1b873ef9b0 100644 --- a/exporters/otlp/otlp_metric_test.go +++ b/exporters/otlp/otlp_metric_test.go @@ -658,7 +658,7 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me default: t.Fatalf("invalid number kind: %v", r.nKind) } - _ = agg.Checkpoint(ckpt, &desc) + _ = agg.SynchronizedCopy(ckpt, &desc) equiv := r.resource.Equivalent() resources[equiv] = r.resource diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index e4a53c28267..6488b7391d9 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -116,21 +116,21 @@ type Aggregator interface { // inspected for a `correlation.Map` or `trace.SpanContext`. Update(context.Context, metric.Number, *metric.Descriptor) error - // Checkpoint is called during collection to finish one period + // SynchronizedCopy is called during collection to finish one period // of aggregation by atomically saving the current state into // the argument Aggregator. // - // Checkpoint() is called concurrently with Update(). These + // SynchronizedCopy() is called concurrently with Update(). These // two methods must be synchronized with respect to each // other, for correctness. // - // The checkpointed Aggregator can be converted into one of - // the interfaces in the `aggregator` sub-package, according - // to kind of Aggregator that was selected. + // After saving a synchronized copy, the Aggregator can be converted + // into one or more of the interfaces in the `aggregation` sub-package, + // according to kind of Aggregator that was selected. // // This call has no Context argument because it is expected to // perform only computation. - Checkpoint(Aggregator, *metric.Descriptor) error + SynchronizedCopy(Aggregator, *metric.Descriptor) error // Merge combines the checkpointed state from the argument // Aggregator into this Aggregator. Merge is not synchronized diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 0c19e7fbe40..534aae7a7da 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -46,7 +46,7 @@ var _ aggregation.Points = &Aggregator{} // New returns a new array aggregator, which aggregates recorded // measurements by storing them in an array. This type uses a mutex -// for Update() and Checkpoint() concurrency. +// for Update() and SynchronizedCopy() concurrency. func New(cnt int) []Aggregator { return make([]Aggregator, cnt) } @@ -89,7 +89,7 @@ func (c *Aggregator) Points() ([]metric.Number, error) { // Checkpoint saves the current state and resets the current state to // the empty set, taking a lock to prevent concurrent Update() calls. -func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) @@ -109,7 +109,7 @@ func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) e } // Update adds the recorded measurement to the current data set. -// Update takes a lock to prevent concurrent Update() and Checkpoint() +// Update takes a lock to prevent concurrent Update() and SynchronizedCopy() // calls. func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error { c.lock.Lock() diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index ad52d15a294..2f94d34db15 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -77,7 +77,7 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { test.CheckedUpdate(t, agg, y, descriptor) } - err := agg.Checkpoint(ckpt, descriptor) + err := agg.SynchronizedCopy(ckpt, descriptor) require.NoError(t, err) checkZero(t, agg, descriptor) @@ -154,8 +154,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - _ = agg1.Checkpoint(ckpt1, descriptor) - _ = agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.SynchronizedCopy(ckpt1, descriptor) + _ = agg2.SynchronizedCopy(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) @@ -232,7 +232,7 @@ func TestArrayErrors(t *testing.T) { if profile.NumberKind == metric.Float64NumberKind { test.CheckedUpdate(t, agg, metric.NewFloat64Number(math.NaN()), descriptor) } - _ = agg.Checkpoint(ckpt, descriptor) + _ = agg.SynchronizedCopy(ckpt, descriptor) count, err := ckpt.Count() require.Equal(t, int64(1), count, "NaN value was not counted") @@ -297,7 +297,7 @@ func TestArrayFloat64(t *testing.T) { test.CheckedUpdate(t, agg, metric.NewFloat64Number(f), descriptor) } - _ = agg.Checkpoint(ckpt, descriptor) + _ = agg.SynchronizedCopy(ckpt, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 8321acd0d51..5e737444745 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -106,7 +106,7 @@ func (c *Aggregator) toNumber(f float64) metric.Number { // Checkpoint saves the current state and resets the current state to // a new sketch, taking a lock to prevent concurrent Update() calls. -func (c *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) error { +func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, _ *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) @@ -121,7 +121,7 @@ func (c *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) erro } // Update adds the recorded measurement to the current data set. -// Update takes a lock to prevent concurrent Update() and Checkpoint() +// Update takes a lock to prevent concurrent Update() and SynchronizedCopy() // calls. func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error { c.lock.Lock() diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index e69cf1e285c..6293817c00b 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -80,7 +80,7 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { test.CheckedUpdate(t, agg, y, descriptor) } - err := agg.Checkpoint(ckpt, descriptor) + err := agg.SynchronizedCopy(ckpt, descriptor) require.NoError(t, err) checkZero(t, agg, descriptor) @@ -156,8 +156,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - _ = agg1.Checkpoint(ckpt1, descriptor) - _ = agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.SynchronizedCopy(ckpt1, descriptor) + _ = agg2.SynchronizedCopy(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg1, descriptor) diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go index b3daa42a845..0a9697426d8 100644 --- a/sdk/metric/aggregator/histogram/histogram.go +++ b/sdk/metric/aggregator/histogram/histogram.go @@ -110,7 +110,7 @@ func (c *Aggregator) Histogram() (aggregation.Buckets, error) { // the empty set. Since no locks are taken, there is a chance that // the independent Sum, Count and Bucket Count are not consistent with each // other. -func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go index c3044815f7a..6c77c5428fd 100644 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ b/sdk/metric/aggregator/histogram/histogram_test.go @@ -121,7 +121,7 @@ func testHistogram(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - _ = agg.Checkpoint(ckpt, descriptor) + _ = agg.SynchronizedCopy(ckpt, descriptor) checkZero(t, agg, descriptor) @@ -184,8 +184,8 @@ func TestHistogramMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - _ = agg1.Checkpoint(ckpt1, descriptor) - _ = agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.SynchronizedCopy(ckpt1, descriptor) + _ = agg2.SynchronizedCopy(ckpt2, descriptor) test.CheckedMerge(t, ckpt1, ckpt2, descriptor) @@ -223,7 +223,7 @@ func TestHistogramNotSet(t *testing.T) { agg, ckpt := new2(descriptor) - err := agg.Checkpoint(ckpt, descriptor) + err := agg.SynchronizedCopy(ckpt, descriptor) require.NoError(t, err) checkZero(t, agg, descriptor) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue.go b/sdk/metric/aggregator/lastvalue/lastvalue.go index ff2f24c89fa..fe02cd65199 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue.go @@ -86,7 +86,7 @@ func (g *Aggregator) LastValue() (metric.Number, time.Time, error) { } // Checkpoint atomically saves the current value. -func (g *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) error { +func (g *Aggregator) SynchronizedCopy(oa export.Aggregator, _ *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(g, oa) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go index 0eadf224f0b..dd55705e869 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue_test.go @@ -80,7 +80,7 @@ func TestLastValueUpdate(t *testing.T) { test.CheckedUpdate(t, agg, x, record) } - err := agg.Checkpoint(ckpt, record) + err := agg.SynchronizedCopy(ckpt, record) require.NoError(t, err) lv, _, err := ckpt.LastValue() @@ -102,8 +102,8 @@ func TestLastValueMerge(t *testing.T) { test.CheckedUpdate(t, agg1, first1, descriptor) test.CheckedUpdate(t, agg2, first2, descriptor) - _ = agg1.Checkpoint(ckpt1, descriptor) - _ = agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.SynchronizedCopy(ckpt1, descriptor) + _ = agg2.SynchronizedCopy(ckpt2, descriptor) checkZero(t, agg1) checkZero(t, agg2) @@ -127,7 +127,7 @@ func TestLastValueNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueObserverKind, metric.Int64NumberKind) g, ckpt := new2() - _ = g.Checkpoint(ckpt, descriptor) + _ = g.SynchronizedCopy(ckpt, descriptor) checkZero(t, g) } diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc.go b/sdk/metric/aggregator/minmaxsumcount/mmsc.go index a8a3082013b..6381c60b193 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc.go @@ -48,7 +48,7 @@ var _ aggregation.MinMaxSumCount = &Aggregator{} // count. It does not compute quantile information other than Min and // Max. // -// This type uses a mutex for Update() and Checkpoint() concurrency. +// This type uses a mutex for Update() and SynchronizedCopy() concurrency. func New(cnt int, desc *metric.Descriptor) []Aggregator { kind := desc.NumberKind() aggs := make([]Aggregator, cnt) @@ -98,7 +98,7 @@ func (c *Aggregator) Max() (metric.Number, error) { // Checkpoint saves the current state and resets the current state to // the empty set. -func (c *Aggregator) Checkpoint(oa export.Aggregator, desc *metric.Descriptor) error { +func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go index 55a3bc3d72c..0b25ccdb2ef 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go @@ -120,7 +120,7 @@ func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - _ = agg.Checkpoint(ckpt, descriptor) + _ = agg.SynchronizedCopy(ckpt, descriptor) checkZero(t, agg, descriptor) @@ -173,8 +173,8 @@ func TestMinMaxSumCountMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - _ = agg1.Checkpoint(ckpt1, descriptor) - _ = agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.SynchronizedCopy(ckpt1, descriptor) + _ = agg2.SynchronizedCopy(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) @@ -219,7 +219,7 @@ func TestMaxSumCountNotSet(t *testing.T) { alloc := New(2, descriptor) agg, ckpt := &alloc[0], &alloc[1] - _ = agg.Checkpoint(ckpt, descriptor) + _ = agg.SynchronizedCopy(ckpt, descriptor) asum, err := ckpt.Sum() require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") diff --git a/sdk/metric/aggregator/sum/sum.go b/sdk/metric/aggregator/sum/sum.go index 1bf13319f68..a5152f34205 100644 --- a/sdk/metric/aggregator/sum/sum.go +++ b/sdk/metric/aggregator/sum/sum.go @@ -53,7 +53,7 @@ func (c *Aggregator) Sum() (metric.Number, error) { // Checkpoint atomically saves the current value and resets the // current sum to zero. -func (c *Aggregator) Checkpoint(oa export.Aggregator, _ *metric.Descriptor) error { +func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, _ *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { return aggregator.NewInconsistentAggregatorError(c, oa) diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index b7bd6e655e5..80dd0b72f73 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -74,7 +74,7 @@ func TestCounterSum(t *testing.T) { test.CheckedUpdate(t, agg, x, descriptor) } - err := agg.Checkpoint(ckpt, descriptor) + err := agg.SynchronizedCopy(ckpt, descriptor) require.NoError(t, err) checkZero(t, agg, descriptor) @@ -102,7 +102,7 @@ func TestValueRecorderSum(t *testing.T) { sum.AddNumber(profile.NumberKind, r2) } - _ = agg.Checkpoint(ckpt, descriptor) + _ = agg.SynchronizedCopy(ckpt, descriptor) checkZero(t, agg, descriptor) asum, err := ckpt.Sum() @@ -125,8 +125,8 @@ func TestCounterMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - _ = agg1.Checkpoint(ckpt1, descriptor) - _ = agg2.Checkpoint(ckpt2, descriptor) + _ = agg1.SynchronizedCopy(ckpt1, descriptor) + _ = agg2.SynchronizedCopy(ckpt2, descriptor) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) diff --git a/sdk/metric/histogram_stress_test.go b/sdk/metric/histogram_stress_test.go index 13f7625dd1e..01b1d258ae6 100644 --- a/sdk/metric/histogram_stress_test.go +++ b/sdk/metric/histogram_stress_test.go @@ -46,7 +46,7 @@ func TestStressInt64Histogram(t *testing.T) { startTime := time.Now() for time.Since(startTime) < time.Second { - _ = h.Checkpoint(ckpt, &desc) + _ = h.SynchronizedCopy(ckpt, &desc) b, _ := ckpt.Histogram() c, _ := ckpt.Count() diff --git a/sdk/metric/integrator/simple/simple_test.go b/sdk/metric/integrator/simple/simple_test.go index a66013b8ab4..0135e3fd0f4 100644 --- a/sdk/metric/integrator/simple/simple_test.go +++ b/sdk/metric/integrator/simple/simple_test.go @@ -189,9 +189,9 @@ func TestSimpleStateful(t *testing.T) { // Update and re-checkpoint the original record. _ = caggA.Update(ctx, metric.NewInt64Number(20), &CounterADesc) _ = caggB.Update(ctx, metric.NewInt64Number(20), &CounterBDesc) - err := caggA.Checkpoint(ckptA, &CounterADesc) + err := caggA.SynchronizedCopy(ckptA, &CounterADesc) require.NoError(t, err) - err = caggB.Checkpoint(ckptB, &CounterBDesc) + err = caggB.SynchronizedCopy(ckptB, &CounterBDesc) require.NoError(t, err) // As yet cagg has not been passed to Integrator.Process. Should diff --git a/sdk/metric/minmaxsumcount_stress_test.go b/sdk/metric/minmaxsumcount_stress_test.go index 3419808ebb3..50d8305059c 100644 --- a/sdk/metric/minmaxsumcount_stress_test.go +++ b/sdk/metric/minmaxsumcount_stress_test.go @@ -47,7 +47,7 @@ func TestStressInt64MinMaxSumCount(t *testing.T) { startTime := time.Now() for time.Since(startTime) < time.Second { - _ = mmsc.Checkpoint(ckpt, &desc) + _ = mmsc.SynchronizedCopy(ckpt, &desc) s, _ := ckpt.Sum() c, _ := ckpt.Count() diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 8bc6933a41e..3a26c8606c4 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -438,7 +438,7 @@ func (m *Accumulator) checkpointRecord(r *record) int { if r.current == nil { return 0 } - err := r.current.Checkpoint(r.checkpoint, &r.inst.descriptor) + err := r.current.SynchronizedCopy(r.checkpoint, &r.inst.descriptor) if err != nil { global.Handle(err) return 0 From e688d64cd360d3cca8d92fd15d69a5276b5f5187 Mon Sep 17 00:00:00 2001 From: Joshua MacDonald Date: Fri, 12 Jun 2020 23:51:56 -0700 Subject: [PATCH 18/21] Apply suggestions from code review Thank you for helping improve the tests. :star: Co-authored-by: Tyler Yahn --- exporters/metric/stdout/stdout_test.go | 16 ++++++++-------- exporters/metric/test/test.go | 4 ++-- exporters/otlp/internal/transform/metric_test.go | 10 +++++----- exporters/otlp/otlp_metric_test.go | 2 +- sdk/metric/aggregator/array/array.go | 2 +- sdk/metric/aggregator/array/array_test.go | 8 ++++---- sdk/metric/aggregator/ddsketch/ddsketch.go | 2 +- sdk/metric/aggregator/ddsketch/ddsketch_test.go | 4 ++-- .../aggregator/histogram/histogram_test.go | 6 +++--- sdk/metric/aggregator/lastvalue/lastvalue.go | 2 +- .../aggregator/lastvalue/lastvalue_test.go | 6 +++--- sdk/metric/aggregator/minmaxsumcount/mmsc.go | 2 +- .../aggregator/minmaxsumcount/mmsc_test.go | 8 ++++---- sdk/metric/aggregator/sum/sum.go | 2 +- sdk/metric/aggregator/sum/sum_test.go | 6 +++--- sdk/metric/histogram_stress_test.go | 2 +- 16 files changed, 41 insertions(+), 41 deletions(-) diff --git a/exporters/metric/stdout/stdout_test.go b/exporters/metric/stdout/stdout_test.go index e1556c8ca75..2208bee02b7 100644 --- a/exporters/metric/stdout/stdout_test.go +++ b/exporters/metric/stdout/stdout_test.go @@ -104,7 +104,7 @@ func TestStdoutTimestamp(t *testing.T) { lvagg, ckpt := test.Unslice2(lastvalue.New(2)) aggtest.CheckedUpdate(t, lvagg, metric.NewInt64Number(321), &desc) - _ = lvagg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, lvagg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt) @@ -151,7 +151,7 @@ func TestStdoutCounterFormat(t *testing.T) { cagg, ckpt := test.Unslice2(sum.New(2)) aggtest.CheckedUpdate(fix.t, cagg, metric.NewInt64Number(123), &desc) - _ = cagg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, cagg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -169,7 +169,7 @@ func TestStdoutLastValueFormat(t *testing.T) { lvagg, ckpt := test.Unslice2(lastvalue.New(2)) aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc) - _ = lvagg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, lvagg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -189,7 +189,7 @@ func TestStdoutMinMaxSumCount(t *testing.T) { aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(123.456), &desc) aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(876.543), &desc) - _ = magg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, magg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -212,7 +212,7 @@ func TestStdoutValueRecorderFormat(t *testing.T) { aggtest.CheckedUpdate(fix.t, aagg, metric.NewFloat64Number(float64(i)+0.5), &desc) } - _ = aagg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, aagg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt, kv.String("A", "B"), kv.String("C", "D")) @@ -256,7 +256,7 @@ func TestStdoutNoData(t *testing.T) { checkpointSet := test.NewCheckpointSet(testResource) - _ = agg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, agg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt) @@ -278,7 +278,7 @@ func TestStdoutLastValueNotSet(t *testing.T) { desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) lvagg, ckpt := test.Unslice2(lastvalue.New(2)) - _ = lvagg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, lvagg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, lvagg, kv.String("A", "B"), kv.String("C", "D")) @@ -330,7 +330,7 @@ func TestStdoutResource(t *testing.T) { lvagg, ckpt := test.Unslice2(lastvalue.New(2)) aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc) - _ = lvagg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, lvagg.SynchronizedCopy(ckpt, &desc)) checkpointSet.Add(&desc, ckpt, tc.attrs...) diff --git a/exporters/metric/test/test.go b/exporters/metric/test/test.go index d196e511786..f0d2126a6d4 100644 --- a/exporters/metric/test/test.go +++ b/exporters/metric/test/test.go @@ -51,7 +51,7 @@ func (*NoopAggregator) Update(context.Context, metric.Number, *metric.Descriptor return nil } -// Checkpoint implements export.Aggregator. +// SynchronizedCopy implements export.Aggregator. func (*NoopAggregator) SynchronizedCopy(export.Aggregator, *metric.Descriptor) error { return nil } @@ -113,7 +113,7 @@ func Unslice2(sl interface{}) (one, two export.Aggregator) { panic("Invalid Unslice2") } if slv.Len() != 2 { - panic("Invalid Unslice2") + panic("Invalid Unslice2: length > 2") } one = slv.Index(0).Addr().Interface().(export.Aggregator) two = slv.Index(1).Addr().Interface().(export.Aggregator) diff --git a/exporters/otlp/internal/transform/metric_test.go b/exporters/otlp/internal/transform/metric_test.go index 02d275d2b5b..d9d3f774409 100644 --- a/exporters/otlp/internal/transform/metric_test.go +++ b/exporters/otlp/internal/transform/metric_test.go @@ -91,7 +91,7 @@ func TestMinMaxSumCountValue(t *testing.T) { assert.EqualError(t, err, aggregation.ErrNoData.Error()) // Checkpoint to set non-zero values - _ = mmsc.SynchronizedCopy(ckpt, &metric.Descriptor{}) + require.NoError(t, mmsc.SynchronizedCopy(ckpt, &metric.Descriptor{})) min, max, sum, count, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount)) if assert.NoError(t, err) { assert.Equal(t, min, metric.NewInt64Number(1)) @@ -148,7 +148,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) { if !assert.NoError(t, mmsc.Update(ctx, 1, &metric.Descriptor{})) { return } - _ = mmsc.SynchronizedCopy(ckpt, &metric.Descriptor{}) + require.NoError(t, mmsc.SynchronizedCopy(ckpt, &metric.Descriptor{})) for _, test := range tests { desc := metric.NewDescriptor(test.name, test.metricKind, test.numberKind, metric.WithDescription(test.description), @@ -168,7 +168,7 @@ func TestMinMaxSumCountDatapoints(t *testing.T) { assert.NoError(t, mmsc.Update(context.Background(), 1, &desc)) assert.NoError(t, mmsc.Update(context.Background(), 10, &desc)) - _ = mmsc.SynchronizedCopy(ckpt, &desc) + require.NoError(t, mmsc.SynchronizedCopy(ckpt, &desc)) expected := []*metricpb.SummaryDataPoint{ { Count: 2, @@ -264,7 +264,7 @@ func TestSumInt64DataPoints(t *testing.T) { labels := label.NewSet() s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc)) - _ = s.SynchronizedCopy(ckpt, &desc) + require.NoError(t, s.SynchronizedCopy(ckpt, &desc)) if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint{{Value: 1}}, m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint(nil), m.DoubleDataPoints) @@ -278,7 +278,7 @@ func TestSumFloat64DataPoints(t *testing.T) { labels := label.NewSet() s, ckpt := test.Unslice2(sumAgg.New(2)) assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc)) - _ = s.SynchronizedCopy(ckpt, &desc) + require.NoError(t, s.SynchronizedCopy(ckpt, &desc)) if m, err := sum(&desc, &labels, ckpt.(aggregation.Sum)); assert.NoError(t, err) { assert.Equal(t, []*metricpb.Int64DataPoint(nil), m.Int64DataPoints) assert.Equal(t, []*metricpb.DoubleDataPoint{{Value: 1}}, m.DoubleDataPoints) diff --git a/exporters/otlp/otlp_metric_test.go b/exporters/otlp/otlp_metric_test.go index b1b873ef9b0..4c0d11d4d17 100644 --- a/exporters/otlp/otlp_metric_test.go +++ b/exporters/otlp/otlp_metric_test.go @@ -658,7 +658,7 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me default: t.Fatalf("invalid number kind: %v", r.nKind) } - _ = agg.SynchronizedCopy(ckpt, &desc) + require.NoError(t, agg.SynchronizedCopy(ckpt, &desc)) equiv := r.resource.Equivalent() resources[equiv] = r.resource diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 534aae7a7da..4270bf31c61 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -87,7 +87,7 @@ func (c *Aggregator) Points() ([]metric.Number, error) { return c.points, nil } -// Checkpoint saves the current state and resets the current state to +// SynchronizedCopy saves the current state to oa and resets the current state to // the empty set, taking a lock to prevent concurrent Update() calls. func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 2f94d34db15..234ea24f0e5 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -154,8 +154,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - _ = agg1.SynchronizedCopy(ckpt1, descriptor) - _ = agg2.SynchronizedCopy(ckpt2, descriptor) + require.NoError(t, agg1.SynchronizedCopy(ckpt1, descriptor)) + require.NoError(t, agg2.SynchronizedCopy(ckpt2, descriptor)) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) @@ -232,7 +232,7 @@ func TestArrayErrors(t *testing.T) { if profile.NumberKind == metric.Float64NumberKind { test.CheckedUpdate(t, agg, metric.NewFloat64Number(math.NaN()), descriptor) } - _ = agg.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, agg.SynchronizedCopy(ckpt, descriptor)) count, err := ckpt.Count() require.Equal(t, int64(1), count, "NaN value was not counted") @@ -297,7 +297,7 @@ func TestArrayFloat64(t *testing.T) { test.CheckedUpdate(t, agg, metric.NewFloat64Number(f), descriptor) } - _ = agg.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, agg.SynchronizedCopy(ckpt, descriptor)) all.Sort() diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 5e737444745..e3604890d72 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -104,7 +104,7 @@ func (c *Aggregator) toNumber(f float64) metric.Number { return metric.NewInt64Number(int64(f)) } -// Checkpoint saves the current state and resets the current state to +// SynchronizedCopy saves the current state into oa and resets the current state to // a new sketch, taking a lock to prevent concurrent Update() calls. func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, _ *metric.Descriptor) error { o, _ := oa.(*Aggregator) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 6293817c00b..1fdbde3679b 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -156,8 +156,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - _ = agg1.SynchronizedCopy(ckpt1, descriptor) - _ = agg2.SynchronizedCopy(ckpt2, descriptor) + require.NoError(t, agg1.SynchronizedCopy(ckpt1, descriptor)) + require.NoError(t, agg2.SynchronizedCopy(ckpt2, descriptor)) checkZero(t, agg1, descriptor) checkZero(t, agg1, descriptor) diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go index 6c77c5428fd..b25c6421397 100644 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ b/sdk/metric/aggregator/histogram/histogram_test.go @@ -121,7 +121,7 @@ func testHistogram(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - _ = agg.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, agg.SynchronizedCopy(ckpt, descriptor)) checkZero(t, agg, descriptor) @@ -184,8 +184,8 @@ func TestHistogramMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - _ = agg1.SynchronizedCopy(ckpt1, descriptor) - _ = agg2.SynchronizedCopy(ckpt2, descriptor) + require.NoError(t, agg1.SynchronizedCopy(ckpt1, descriptor)) + require.NoError(t, agg2.SynchronizedCopy(ckpt2, descriptor)) test.CheckedMerge(t, ckpt1, ckpt2, descriptor) diff --git a/sdk/metric/aggregator/lastvalue/lastvalue.go b/sdk/metric/aggregator/lastvalue/lastvalue.go index fe02cd65199..64090427b93 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue.go @@ -85,7 +85,7 @@ func (g *Aggregator) LastValue() (metric.Number, time.Time, error) { return gd.value.AsNumber(), gd.timestamp, nil } -// Checkpoint atomically saves the current value. +// SynchronizedCopy atomically saves the current value. func (g *Aggregator) SynchronizedCopy(oa export.Aggregator, _ *metric.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go index dd55705e869..c9c12cf4f20 100644 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ b/sdk/metric/aggregator/lastvalue/lastvalue_test.go @@ -102,8 +102,8 @@ func TestLastValueMerge(t *testing.T) { test.CheckedUpdate(t, agg1, first1, descriptor) test.CheckedUpdate(t, agg2, first2, descriptor) - _ = agg1.SynchronizedCopy(ckpt1, descriptor) - _ = agg2.SynchronizedCopy(ckpt2, descriptor) + require.NoError(t, agg1.SynchronizedCopy(ckpt1, descriptor)) + require.NoError(t, agg2.SynchronizedCopy(ckpt2, descriptor)) checkZero(t, agg1) checkZero(t, agg2) @@ -127,7 +127,7 @@ func TestLastValueNotSet(t *testing.T) { descriptor := test.NewAggregatorTest(metric.ValueObserverKind, metric.Int64NumberKind) g, ckpt := new2() - _ = g.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, g.SynchronizedCopy(ckpt, descriptor)) checkZero(t, g) } diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc.go b/sdk/metric/aggregator/minmaxsumcount/mmsc.go index 6381c60b193..560309038c8 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc.go @@ -96,7 +96,7 @@ func (c *Aggregator) Max() (metric.Number, error) { return c.max, nil } -// Checkpoint saves the current state and resets the current state to +// SynchronizedCopy saves the current state into oa and resets the current state to // the empty set. func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, desc *metric.Descriptor) error { o, _ := oa.(*Aggregator) diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go index 0b25ccdb2ef..9e193ce43d9 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc_test.go @@ -120,7 +120,7 @@ func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { test.CheckedUpdate(t, agg, x, descriptor) } - _ = agg.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, agg.SynchronizedCopy(ckpt, descriptor)) checkZero(t, agg, descriptor) @@ -173,8 +173,8 @@ func TestMinMaxSumCountMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - _ = agg1.SynchronizedCopy(ckpt1, descriptor) - _ = agg2.SynchronizedCopy(ckpt2, descriptor) + require.NoError(t, agg1.SynchronizedCopy(ckpt1, descriptor)) + require.NoError(t, agg2.SynchronizedCopy(ckpt2, descriptor)) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) @@ -219,7 +219,7 @@ func TestMaxSumCountNotSet(t *testing.T) { alloc := New(2, descriptor) agg, ckpt := &alloc[0], &alloc[1] - _ = agg.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, agg.SynchronizedCopy(ckpt, descriptor)) asum, err := ckpt.Sum() require.Equal(t, metric.Number(0), asum, "Empty checkpoint sum = 0") diff --git a/sdk/metric/aggregator/sum/sum.go b/sdk/metric/aggregator/sum/sum.go index a5152f34205..d402a378b66 100644 --- a/sdk/metric/aggregator/sum/sum.go +++ b/sdk/metric/aggregator/sum/sum.go @@ -51,7 +51,7 @@ func (c *Aggregator) Sum() (metric.Number, error) { return c.value, nil } -// Checkpoint atomically saves the current value and resets the +// SynchronizedCopy atomically saves the current value into oa and resets the // current sum to zero. func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, _ *metric.Descriptor) error { o, _ := oa.(*Aggregator) diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go index 80dd0b72f73..eeb6755c20d 100644 --- a/sdk/metric/aggregator/sum/sum_test.go +++ b/sdk/metric/aggregator/sum/sum_test.go @@ -102,7 +102,7 @@ func TestValueRecorderSum(t *testing.T) { sum.AddNumber(profile.NumberKind, r2) } - _ = agg.SynchronizedCopy(ckpt, descriptor) + require.NoError(t, agg.SynchronizedCopy(ckpt, descriptor)) checkZero(t, agg, descriptor) asum, err := ckpt.Sum() @@ -125,8 +125,8 @@ func TestCounterMerge(t *testing.T) { test.CheckedUpdate(t, agg2, x, descriptor) } - _ = agg1.SynchronizedCopy(ckpt1, descriptor) - _ = agg2.SynchronizedCopy(ckpt2, descriptor) + require.NoError(t, agg1.SynchronizedCopy(ckpt1, descriptor)) + require.NoError(t, agg2.SynchronizedCopy(ckpt2, descriptor)) checkZero(t, agg1, descriptor) checkZero(t, agg2, descriptor) diff --git a/sdk/metric/histogram_stress_test.go b/sdk/metric/histogram_stress_test.go index 01b1d258ae6..377079a209c 100644 --- a/sdk/metric/histogram_stress_test.go +++ b/sdk/metric/histogram_stress_test.go @@ -46,7 +46,7 @@ func TestStressInt64Histogram(t *testing.T) { startTime := time.Now() for time.Since(startTime) < time.Second { - _ = h.SynchronizedCopy(ckpt, &desc) + require.NoError(t, h.SynchronizedCopy(ckpt, &desc)) b, _ := ckpt.Histogram() c, _ := ckpt.Count() From 16ed1c1d63336152583e3e37333d2aba91ad5077 Mon Sep 17 00:00:00 2001 From: Joshua MacDonald Date: Sat, 13 Jun 2020 00:02:11 -0700 Subject: [PATCH 19/21] Update sdk/metric/aggregator/histogram/histogram.go Co-authored-by: Tyler Yahn --- sdk/metric/aggregator/histogram/histogram.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go index 0a9697426d8..91fe17647c2 100644 --- a/sdk/metric/aggregator/histogram/histogram.go +++ b/sdk/metric/aggregator/histogram/histogram.go @@ -106,7 +106,7 @@ func (c *Aggregator) Histogram() (aggregation.Buckets, error) { }, nil } -// Checkpoint saves the current state and resets the current state to +// SynchronizedCopy saves the current state into oa and resets the current state to // the empty set. Since no locks are taken, there is a chance that // the independent Sum, Count and Bucket Count are not consistent with each // other. From 5a7a1b5f881d635c04e6dc6ef275ee04e25c2300 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 13 Jun 2020 00:45:45 -0700 Subject: [PATCH 20/21] Updates --- .../otlp/internal/transform/metric_test.go | 1 + sdk/export/metric/metric.go | 14 +++++++++----- sdk/metric/aggregator/minmaxsumcount/mmsc.go | 5 ++++- sdk/metric/aggregator/test/test.go | 18 ++++++++++++++++++ sdk/metric/histogram_stress_test.go | 2 ++ 5 files changed, 34 insertions(+), 6 deletions(-) diff --git a/exporters/otlp/internal/transform/metric_test.go b/exporters/otlp/internal/transform/metric_test.go index d9d3f774409..fcc8b6a22c8 100644 --- a/exporters/otlp/internal/transform/metric_test.go +++ b/exporters/otlp/internal/transform/metric_test.go @@ -22,6 +22,7 @@ import ( commonpb "github.com/open-telemetry/opentelemetry-proto/gen/go/common/v1" metricpb "github.com/open-telemetry/opentelemetry-proto/gen/go/metrics/v1" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/api/kv" "go.opentelemetry.io/otel/api/label" diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 6488b7391d9..937676b8d69 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -116,9 +116,9 @@ type Aggregator interface { // inspected for a `correlation.Map` or `trace.SpanContext`. Update(context.Context, metric.Number, *metric.Descriptor) error - // SynchronizedCopy is called during collection to finish one period - // of aggregation by atomically saving the current state into - // the argument Aggregator. + // SynchronizedCopy is called during collection to finish one + // period of aggregation by atomically saving the + // currently-updating state into the argument Aggregator. // // SynchronizedCopy() is called concurrently with Update(). These // two methods must be synchronized with respect to each @@ -128,14 +128,18 @@ type Aggregator interface { // into one or more of the interfaces in the `aggregation` sub-package, // according to kind of Aggregator that was selected. // + // This method will return an InconsistentAggregatorError if + // this Aggregator cannot be copied into the destination due + // to an incompatible type. + // // This call has no Context argument because it is expected to // perform only computation. - SynchronizedCopy(Aggregator, *metric.Descriptor) error + SynchronizedCopy(destination Aggregator, descriptor *metric.Descriptor) error // Merge combines the checkpointed state from the argument // Aggregator into this Aggregator. Merge is not synchronized // with respect to Update, and should only be called on - // checkpoiunted Aggregators. + // checkpointed Aggregators. // // The owner of a checkpointed // Aggregator is responsible for synchronization. diff --git a/sdk/metric/aggregator/minmaxsumcount/mmsc.go b/sdk/metric/aggregator/minmaxsumcount/mmsc.go index 560309038c8..7e97013f121 100644 --- a/sdk/metric/aggregator/minmaxsumcount/mmsc.go +++ b/sdk/metric/aggregator/minmaxsumcount/mmsc.go @@ -34,10 +34,10 @@ type ( } state struct { - count int64 sum metric.Number min metric.Number max metric.Number + count int64 } ) @@ -104,6 +104,9 @@ func (c *Aggregator) SynchronizedCopy(oa export.Aggregator, desc *metric.Descrip return aggregator.NewInconsistentAggregatorError(c, oa) } + // TODO: It is incorrect to use an Aggregator of different + // kind. Should we test that o.kind == c.kind? (The same question + // occurs for several of the other aggregators in ../*.) c.lock.Lock() o.state, c.state = c.state, emptyState(c.kind) c.lock.Unlock() diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 062c6b3e29b..3e2cb8094b7 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -17,10 +17,13 @@ package test import ( "context" "math/rand" + "os" "sort" "testing" + "unsafe" "go.opentelemetry.io/otel/api/metric" + ottest "go.opentelemetry.io/otel/internal/testing" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator" ) @@ -63,6 +66,21 @@ func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { } } +// Ensure local struct alignment prior to running tests. +func TestMain(m *testing.M) { + fields := []ottest.FieldOffset{ + { + Name: "Numbers.numbers", + Offset: unsafe.Offsetof(Numbers{}.numbers), + }, + } + if !ottest.Aligned8Byte(fields, os.Stderr) { + os.Exit(1) + } + + os.Exit(m.Run()) +} + // TODO: Expose Numbers in api/metric for sorting support type Numbers struct { diff --git a/sdk/metric/histogram_stress_test.go b/sdk/metric/histogram_stress_test.go index 377079a209c..4f0bced3345 100644 --- a/sdk/metric/histogram_stress_test.go +++ b/sdk/metric/histogram_stress_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" ) From f892ef8c6803f01417af642cc5f0939b0f5448e1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 13 Jun 2020 00:48:56 -0700 Subject: [PATCH 21/21] More comments --- sdk/export/metric/metric.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 937676b8d69..a65e63c53eb 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -138,11 +138,10 @@ type Aggregator interface { // Merge combines the checkpointed state from the argument // Aggregator into this Aggregator. Merge is not synchronized - // with respect to Update, and should only be called on - // checkpointed Aggregators. + // with respect to Update or SynchronizedCopy. // - // The owner of a checkpointed - // Aggregator is responsible for synchronization. + // The owner of an Aggregator being merged is responsible for + // synchronization of both Aggregator states. Merge(Aggregator, *metric.Descriptor) error }