Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix not to return an error when the data point of metrics was empty #3663

Merged
merged 4 commits into from
May 25, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions pkg/app/piped/analysisprovider/metrics/datadog/datadog.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,6 @@ func (p *Provider) QueryPoints(ctx context.Context, query string, queryRange met
if httpResp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected HTTP status code from %s: %d", httpResp.Request.URL, httpResp.StatusCode)
}
if resp.Series == nil || len(*resp.Series) == 0 {
return nil, fmt.Errorf("no query metadata found: %w", metrics.ErrNoDataFound)
}

// Collect data points given by the provider.
var size int
Expand Down
17 changes: 13 additions & 4 deletions pkg/app/piped/executor/analysis/metrics_analyzer.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,10 @@ func (a *metricsAnalyzer) analyzeWithThreshold(ctx context.Context) (bool, error
if err != nil {
return false, fmt.Errorf("failed to run query: %w", err)
}
if len(points) == 0 {
a.logPersister.Infof("[%s] This analysis stage will be skipped since there was no data point to compare", a.id)
return true, nil
}

var outiler metrics.DataPoint
expected := true
Expand Down Expand Up @@ -200,7 +204,7 @@ func (a *metricsAnalyzer) analyzeWithPrevious(ctx context.Context) (expected, fi
for i := range prevPoints {
prevValues = append(prevValues, prevPoints[i].Value)
}
expected, err = compare(values, prevValues, a.cfg.Deviation)
expected, err = a.compare(values, prevValues, a.cfg.Deviation)
if err != nil {
a.logPersister.Errorf("[%s] Failed to compare data points: %v", a.id, err)
a.logPersister.Infof("[%s] Performed query: %q", a.id, a.cfg.Query)
Expand Down Expand Up @@ -265,7 +269,7 @@ func (a *metricsAnalyzer) analyzeWithCanaryBaseline(ctx context.Context) (bool,
baselineValues = append(baselineValues, baselinePoints[i].Value)
}

expected, err := compare(canaryValues, baselineValues, a.cfg.Deviation)
expected, err := a.compare(canaryValues, baselineValues, a.cfg.Deviation)
if err != nil {
a.logPersister.Errorf("[%s] Failed to compare data points: %v", a.id, err)
a.logPersister.Infof("[%s] Performed query for Canary: %q", a.id, canaryQuery)
Expand Down Expand Up @@ -327,7 +331,7 @@ func (a *metricsAnalyzer) analyzeWithCanaryPrimary(ctx context.Context) (bool, e
for i := range primaryPoints {
primaryValues = append(primaryValues, primaryPoints[i].Value)
}
expected, err := compare(canaryValues, primaryValues, a.cfg.Deviation)
expected, err := a.compare(canaryValues, primaryValues, a.cfg.Deviation)
if err != nil {
a.logPersister.Errorf("[%s] Failed to compare data points: %v", a.id, err)
a.logPersister.Infof("[%s] Performed query for Canary: %q", a.id, canaryQuery)
Expand All @@ -354,7 +358,12 @@ func (a *metricsAnalyzer) analyzeWithCanaryPrimary(ctx context.Context) (bool, e

// compare compares the given two samples using Mann-Whitney U test.
// Considered as failure if it deviates in the specified direction as the third argument.
func compare(experiment, control []float64, deviation string) (acceptable bool, err error) {
// If both of the point values is empty, this returns true.
func (a *metricsAnalyzer) compare(experiment, control []float64, deviation string) (acceptable bool, err error) {
if len(experiment) == 0 && len(control) == 0 {
a.logPersister.Infof("[%s] The analysis stage will be skipped since there was no data point to compare", a.id)
return true, nil
}
knanao marked this conversation as resolved.
Show resolved Hide resolved
if len(experiment) == 0 {
return false, fmt.Errorf("no data points of Experiment found")
}
Expand Down
103 changes: 97 additions & 6 deletions pkg/app/piped/executor/analysis/metrics_analyzer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func Test_metricsAnalyzer_analyzeWithThreshold(t *testing.T) {
}
}

func Test_compare(t *testing.T) {
func Test_metricsAnalyzer_compare(t *testing.T) {
t.Parallel()

type args struct {
Expand All @@ -147,13 +147,24 @@ func Test_compare(t *testing.T) {
deviation string
}
testcases := []struct {
name string
args args
wantExpected bool
wantErr bool
name string
metricsAnalyzer *metricsAnalyzer
args args
wantExpected bool
wantErr bool
}{
{
name: "empty data points given",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{},
control: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
Expand All @@ -164,6 +175,16 @@ func Test_compare(t *testing.T) {
},
{
name: "no significance",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
control: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
Expand All @@ -174,6 +195,16 @@ func Test_compare(t *testing.T) {
},
{
name: "deviation on high direction as expected",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{10.1, 10.2, 10.3, 10.4, 10.5},
control: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
Expand All @@ -184,6 +215,16 @@ func Test_compare(t *testing.T) {
},
{
name: "deviation on low direction as expected",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
control: []float64{10.1, 10.2, 10.3, 10.4, 10.5},
Expand All @@ -194,6 +235,16 @@ func Test_compare(t *testing.T) {
},
{
name: "deviation on high direction as unexpected",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{10.1, 10.2, 10.3, 10.4, 10.5},
control: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
Expand All @@ -204,6 +255,16 @@ func Test_compare(t *testing.T) {
},
{
name: "deviation on low direction as unexpected",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{0.1, 0.2, 0.3, 0.4, 0.5},
control: []float64{10.1, 10.2, 10.3, 10.4, 10.5},
Expand All @@ -214,6 +275,16 @@ func Test_compare(t *testing.T) {
},
{
name: "deviation as unexpected",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: []float64{0.1, 0.2, 5.3, 0.2, 0.5},
control: []float64{0.1, 0.1, 0.1, 0.1, 0.1},
Expand All @@ -222,10 +293,30 @@ func Test_compare(t *testing.T) {
wantExpected: false,
wantErr: false,
},
{
name: "the data points is empty",
metricsAnalyzer: &metricsAnalyzer{
id: "id",
cfg: config.AnalysisMetrics{
Provider: "provider",
Query: "query",
},
provider: &fakeMetricsProvider{},
logger: zap.NewNop(),
logPersister: &fakeLogPersister{},
},
args: args{
experiment: nil,
control: nil,
deviation: "EITHER",
},
wantExpected: true,
wantErr: false,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
got, err := compare(tc.args.experiment, tc.args.control, tc.args.deviation)
got, err := tc.metricsAnalyzer.compare(tc.args.experiment, tc.args.control, tc.args.deviation)
assert.Equal(t, tc.wantErr, err != nil)
assert.Equal(t, tc.wantExpected, got)
})
Expand Down