blob: 6b4ce924147e0b0b4484de90ed7ccffa5d23341f [file] [log] [blame]
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file
package catapult_test
import (
"reflect"
"testing"
. "fuchsia.googlesource.com/infra/infra/catapult"
schema "fuchsia.googlesource.com/infra/infra/perf/schema/v1"
"github.com/stretchr/testify/assert"
)
// TODO(kjharland): After writing this test the team decided we shouldn't use
// assertion libraries. Remove `assert.*` references in a later change.
// TODO(kjharland): Add tests to handle corner cases for running statistics
// computations.
func TestConvertVariantsToHistograms(t *testing.T) {
// Returns BenchmarkData for testing with the given sample values.
//
// TODO(kjharland): Inline this.
createBenchmarkData := func(sampleValues []float64) schema.BenchmarkData {
return schema.BenchmarkData{
Label: "example_benchmark_data",
Unit: "ns",
Samples: []schema.Sample{
{
Label: "example_sample",
Values: sampleValues,
},
},
}
}
// Expects that the actual Histogram matches the expected Histogram.
expectHistogram := func(actual *Histogram, expected *Histogram) {
assert.Equal(t, expected.Name, actual.Name)
assert.Equal(t, expected.Unit, actual.Unit)
assert.Equal(t, expected.MaxNumSampleValues, actual.MaxNumSampleValues)
assert.Equal(t, expected.NumNans, actual.NumNans)
assert.Equal(t, expected.Description, actual.Description)
assert.Equal(t, expected.Diagnostics, actual.Diagnostics)
assert.ElementsMatch(t, expected.Running, actual.Running)
assert.Len(t, actual.GUID, 36)
}
t.Run("with no sample values", func(t *testing.T) {
if _, err := ConvertBenchmarkDataToHistograms(
createBenchmarkData([]float64{})); err == nil {
t.Errorf("expected an error")
}
})
t.Run("with one sample value", func(t *testing.T) {
histograms, err := ConvertBenchmarkDataToHistograms(
createBenchmarkData([]float64{5}))
if err != nil {
t.Fatal("failed to create histogram", err)
}
assert.Equal(t, len(histograms), 1)
expectHistogram(&histograms[0], &Histogram{
Name: "example_benchmark_data_example_sample",
Unit: "ms_smallerIsBetter",
MaxNumSampleValues: 1,
NumNans: 0,
Running: []float64{
1, // count
5e-06, // max
-5.301029995663981, // meanlogs
5e-06, // mean
5e-06, // min
5e-06, // sum
0, // variance
},
})
})
t.Run("with two sample values", func(t *testing.T) {
histograms, err := ConvertBenchmarkDataToHistograms(
createBenchmarkData([]float64{7, 11}))
if err != nil {
t.Fatal("failed to create histogram", err)
}
assert.Equal(t, len(histograms), 1)
expectHistogram(&histograms[0], &Histogram{
// Name should be BenchmarkData.Label_Sample.Label. The example data
// is BenchmarkData.Label == "example_benchmark" and Sample.Label ==
// "example_sample".
Name: "example_benchmark_data_example_sample",
Unit: "ms_smallerIsBetter",
MaxNumSampleValues: 2,
NumNans: 0,
Running: []float64{
2, // count
11e-6, // max
-5.056754637413759, // meanlogs
9e-6, // mean
7e-6, // min
1.8e-5, // sum
8e-12, // variance
},
})
})
t.Run("with greater than two sample values", func(t *testing.T) {
histograms, err := ConvertBenchmarkDataToHistograms(
createBenchmarkData([]float64{10, 20, 30, 40, 50}))
if err != nil {
t.Fatal("failed to create histogram", err)
}
assert.Equal(t, len(histograms), 1)
expectHistogram(&histograms[0], &Histogram{
Name: "example_benchmark_data_example_sample",
Unit: "ms_smallerIsBetter",
MaxNumSampleValues: 5,
NumNans: 0,
Running: []float64{
5, // count
5e-05, // max
-4.5841637507904744, // meanlogs
3.0000000000000004e-05, // mean
1e-5, // min
1.5000000000000001e-4, // sum
2.5e-10, // variance
},
})
})
t.Run("when some samples have labels and others don't", func(t *testing.T) {
_, err := ConvertBenchmarkDataToHistograms(schema.BenchmarkData{
Label: "example_benchmark_data",
Unit: "ns",
Samples: []schema.Sample{{
Label: "example_sample",
Values: []float64{1, 2, 3},
}, {
// No label
Values: []float64{4, 5, 6},
}},
})
if err == nil {
t.Error("expected an error but got nil")
}
})
t.Run("when all samples are labeled", func(t *testing.T) {
histograms, err := ConvertBenchmarkDataToHistograms(schema.BenchmarkData{
Label: "example_benchmark_data",
Unit: "ns",
Samples: []schema.Sample{{
Label: "example_sample_a",
Values: []float64{10, 20, 30, 40, 50},
}, {
Label: "example_sample_b",
Values: []float64{10, 20, 30, 40, 50},
}},
})
if err != nil {
t.Fatal("failed to create histogram", err)
}
if len(histograms) != 2 {
t.Fatalf("should have created two histograms, instead got %v", histograms)
}
expectHistogram(&histograms[0], &Histogram{
Name: "example_benchmark_data_example_sample_a",
Unit: "ms_smallerIsBetter",
MaxNumSampleValues: 5,
NumNans: 0,
Running: []float64{
5, // count
5e-05, // max
-4.5841637507904744, // meanlogs
3.0000000000000004e-05, // mean
1e-5, // min
1.5000000000000001e-4, // sum
2.5e-10, // variance
},
})
expectHistogram(&histograms[1], &Histogram{
Name: "example_benchmark_data_example_sample_b",
Unit: "ms_smallerIsBetter",
MaxNumSampleValues: 5,
NumNans: 0,
Running: []float64{
5, // count
5e-05, // max
-4.5841637507904744, // meanlogs
3.0000000000000004e-05, // mean
1e-5, // min
1.5000000000000001e-4, // sum
2.5e-10, // variance
},
})
})
t.Run("when all samples are unlabeled", func(t *testing.T) {
histograms, err := ConvertBenchmarkDataToHistograms(schema.BenchmarkData{
Label: "example_benchmark_data",
Unit: "ns",
Samples: []schema.Sample{{
// no labels
Values: []float64{5, 10, 15, 20, 25},
}, {
Values: []float64{5, 10, 15, 20, 25},
}},
})
if err != nil {
t.Fatal("failed to create histogram", err)
}
if len(histograms) != 1 {
t.Fatalf("should have created one histogram, instead got %v", histograms)
}
expectHistogram(&histograms[0], &Histogram{
Name: "example_benchmark_data",
Unit: "ms_smallerIsBetter",
MaxNumSampleValues: 10,
NumNans: 0,
Running: []float64{
10, // count
2.5e-5, // max
-4.885193746454457, // meanlogs
1.5000000000000002e-05, // mean
5e-06, // min
0.00015000000000000001, // sum
5.555555555555557e-11, // variance
},
})
})
}
func TestHistogram_AddDiagnostic(t *testing.T) {
var testName, testGUID string
var histogram Histogram
setUp := func() {
testName = "test-name"
testGUID = "test-guid"
histogram = Histogram{}
histogram.AddDiagnostic(testName, testGUID)
}
t.Run("should do nothing if adding an identical name-guid pair", func(t *testing.T) {
setUp()
expectedDiagnostics := map[string]string{testName: testGUID}
histogram.AddDiagnostic(testName, testGUID)
if !reflect.DeepEqual(expectedDiagnostics, histogram.Diagnostics) {
t.Errorf("invalid diagnostics map. Expected %v. Got %v",
expectedDiagnostics, histogram.Diagnostics)
}
})
t.Run("should add a new name-guid pair when given a unique name.", func(t *testing.T) {
setUp()
expectedDiagnostics := map[string]string{
testName: testGUID,
"different-name": testGUID,
}
histogram.AddDiagnostic("different-name", testGUID)
if !reflect.DeepEqual(expectedDiagnostics, histogram.Diagnostics) {
t.Errorf("invalid diagnostics map. Expected %v. Got %v",
expectedDiagnostics, histogram.Diagnostics)
}
})
t.Run("should overwrite an existing name-guid pair", func(t *testing.T) {
setUp()
expectedDiagnostics := map[string]string{
testName: "different-guid",
}
histogram.AddDiagnostic(testName, "different-guid")
if !reflect.DeepEqual(expectedDiagnostics, histogram.Diagnostics) {
t.Errorf("Diagnostics map was modified. Expected %v. Got %v",
expectedDiagnostics, histogram.Diagnostics)
}
})
}