blob: e9c1844d32710e8e4fa8ffa5c302fe7fd825373c [file] [log] [blame]
// Copyright 2022 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpc
import (
"context"
"encoding/hex"
"sort"
"testing"
"time"
"cloud.google.com/go/bigquery"
. "github.com/smartystreets/goconvey/convey"
"google.golang.org/protobuf/types/known/timestamppb"
"go.chromium.org/luci/analysis/internal/analysis"
"go.chromium.org/luci/analysis/internal/bugs"
"go.chromium.org/luci/analysis/internal/clustering"
"go.chromium.org/luci/analysis/internal/clustering/algorithms"
"go.chromium.org/luci/analysis/internal/clustering/algorithms/failurereason"
"go.chromium.org/luci/analysis/internal/clustering/algorithms/rulesalgorithm"
"go.chromium.org/luci/analysis/internal/clustering/algorithms/testname"
"go.chromium.org/luci/analysis/internal/clustering/rules"
"go.chromium.org/luci/analysis/internal/clustering/runs"
"go.chromium.org/luci/analysis/internal/config"
"go.chromium.org/luci/analysis/internal/config/compiledcfg"
"go.chromium.org/luci/analysis/internal/perms"
"go.chromium.org/luci/analysis/internal/testutil"
"go.chromium.org/luci/analysis/pbutil"
configpb "go.chromium.org/luci/analysis/proto/config"
pb "go.chromium.org/luci/analysis/proto/v1"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
. "go.chromium.org/luci/common/testing/assertions"
"go.chromium.org/luci/gae/impl/memory"
"go.chromium.org/luci/resultdb/rdbperms"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/auth/authtest"
"go.chromium.org/luci/server/auth/realms"
"go.chromium.org/luci/server/caching"
"go.chromium.org/luci/server/secrets"
"go.chromium.org/luci/server/secrets/testsecrets"
)
func TestClusters(t *testing.T) {
Convey("With a clusters server", t, func() {
ctx := testutil.IntegrationTestContext(t)
ctx = caching.WithEmptyProcessCache(ctx)
// For user identification.
ctx = authtest.MockAuthConfig(ctx)
authState := &authtest.FakeState{
Identity: "user:someone@example.com",
IdentityGroups: []string{"luci-analysis-access"},
}
ctx = auth.WithState(ctx, authState)
ctx = secrets.Use(ctx, &testsecrets.Store{})
// Provides datastore implementation needed for project config.
ctx = memory.Use(ctx)
analysisClient := newFakeAnalysisClient()
server := NewClustersServer(analysisClient)
configVersion := time.Date(2025, time.August, 12, 0, 1, 2, 3, time.UTC)
projectCfg := config.CreatePlaceholderProjectConfig()
projectCfg.LastUpdated = timestamppb.New(configVersion)
projectCfg.Monorail.DisplayPrefix = "crbug.com"
projectCfg.Monorail.MonorailHostname = "bugs.chromium.org"
configs := make(map[string]*configpb.ProjectConfig)
configs["testproject"] = projectCfg
err := config.SetTestProjectConfig(ctx, configs)
So(err, ShouldBeNil)
compiledTestProjectCfg, err := compiledcfg.NewConfig(projectCfg)
So(err, ShouldBeNil)
// Rules version is in microsecond granularity, consistent with
// the granularity of Spanner commit timestamps.
rulesVersion := time.Date(2021, time.February, 12, 1, 2, 4, 5000, time.UTC)
rs := []*rules.FailureAssociationRule{
rules.NewRule(0).
WithProject("testproject").
WithRuleDefinition(`test LIKE "%TestSuite.TestName%"`).
WithPredicateLastUpdated(rulesVersion.Add(-1 * time.Hour)).
WithBug(bugs.BugID{
System: "monorail",
ID: "chromium/7654321",
}).Build(),
rules.NewRule(1).
WithProject("testproject").
WithRuleDefinition(`reason LIKE "my_file.cc(%): Check failed: false."`).
WithPredicateLastUpdated(rulesVersion).
WithBug(bugs.BugID{
System: "buganizer",
ID: "82828282",
}).Build(),
rules.NewRule(2).
WithProject("testproject").
WithRuleDefinition(`test LIKE "%Other%"`).
WithPredicateLastUpdated(rulesVersion.Add(-2 * time.Hour)).
WithBug(bugs.BugID{
System: "monorail",
ID: "chromium/912345",
}).Build(),
}
err = rules.SetRulesForTesting(ctx, rs)
So(err, ShouldBeNil)
Convey("Unauthorised requests are rejected", func() {
// Ensure no access to luci-analysis-access.
ctx = auth.WithState(ctx, &authtest.FakeState{
Identity: "user:someone@example.com",
// Not a member of luci-analysis-access.
IdentityGroups: []string{"other-group"},
})
// Make some request (the request should not matter, as
// a common decorator is used for all requests.)
request := &pb.ClusterRequest{
Project: "testproject",
}
rule, err := server.Cluster(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "not a member of luci-analysis-access")
So(rule, ShouldBeNil)
})
Convey("Cluster", func() {
authState.IdentityPermissions = []authtest.RealmPermission{
{
Realm: "testproject:@root",
Permission: perms.PermGetClustersByFailure,
},
{
Realm: "testproject:@root",
Permission: perms.PermGetRule,
},
}
request := &pb.ClusterRequest{
Project: "testproject",
TestResults: []*pb.ClusterRequest_TestResult{
{
RequestTag: "my tag 1",
TestId: "ninja://chrome/test:interactive_ui_tests/TestSuite.TestName",
FailureReason: &pb.FailureReason{
PrimaryErrorMessage: "my_file.cc(123): Check failed: false.",
},
},
{
RequestTag: "my tag 2",
TestId: "Other_test",
},
},
}
Convey("Not authorised to cluster", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetClustersByFailure)
response, err := server.Cluster(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.getByFailure")
So(response, ShouldBeNil)
})
Convey("Not authorised to get rule", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetRule)
response, err := server.Cluster(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.rules.get")
So(response, ShouldBeNil)
})
Convey("With a valid request", func() {
// Run
response, err := server.Cluster(ctx, request)
// Verify
So(err, ShouldBeNil)
So(response, ShouldResembleProto, &pb.ClusterResponse{
ClusteredTestResults: []*pb.ClusterResponse_ClusteredTestResult{
{
RequestTag: "my tag 1",
Clusters: sortClusterEntries([]*pb.ClusterResponse_ClusteredTestResult_ClusterEntry{
{
ClusterId: &pb.ClusterId{
Algorithm: "rules",
Id: rs[0].RuleID,
},
Bug: &pb.AssociatedBug{
System: "monorail",
Id: "chromium/7654321",
LinkText: "crbug.com/7654321",
Url: "https://bugs.chromium.org/p/chromium/issues/detail?id=7654321",
},
}, {
ClusterId: &pb.ClusterId{
Algorithm: "rules",
Id: rs[1].RuleID,
},
Bug: &pb.AssociatedBug{
System: "buganizer",
Id: "82828282",
LinkText: "b/82828282",
Url: "https://issuetracker.google.com/issues/82828282",
},
},
failureReasonClusterEntry(compiledTestProjectCfg, "my_file.cc(123): Check failed: false."),
testNameClusterEntry(compiledTestProjectCfg, "ninja://chrome/test:interactive_ui_tests/TestSuite.TestName"),
}),
},
{
RequestTag: "my tag 2",
Clusters: sortClusterEntries([]*pb.ClusterResponse_ClusteredTestResult_ClusterEntry{
{
ClusterId: &pb.ClusterId{
Algorithm: "rules",
Id: rs[2].RuleID,
},
Bug: &pb.AssociatedBug{
System: "monorail",
Id: "chromium/912345",
LinkText: "crbug.com/912345",
Url: "https://bugs.chromium.org/p/chromium/issues/detail?id=912345",
},
},
testNameClusterEntry(compiledTestProjectCfg, "Other_test"),
}),
},
},
ClusteringVersion: &pb.ClusteringVersion{
AlgorithmsVersion: algorithms.AlgorithmsVersion,
RulesVersion: timestamppb.New(rulesVersion),
ConfigVersion: timestamppb.New(configVersion),
},
})
})
Convey("With missing test ID", func() {
request.TestResults[1].TestId = ""
// Run
response, err := server.Cluster(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "test result 1: test ID must not be empty")
})
Convey("With too many test results", func() {
var testResults []*pb.ClusterRequest_TestResult
for i := 0; i < 1001; i++ {
testResults = append(testResults, &pb.ClusterRequest_TestResult{
TestId: "AnotherTest",
})
}
request.TestResults = testResults
// Run
response, err := server.Cluster(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "too many test results: at most 1000 test results can be clustered in one request")
})
Convey("With project not configured", func() {
err := config.SetTestProjectConfig(ctx, map[string]*configpb.ProjectConfig{})
So(err, ShouldBeNil)
// Run
response, err := server.Cluster(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCFailedPrecondition, "project does not exist in LUCI Analysis")
})
})
Convey("BatchGet", func() {
authState.IdentityPermissions = []authtest.RealmPermission{
{
Realm: "testproject:@root",
Permission: perms.PermGetCluster,
},
{
Realm: "testproject:@root",
Permission: perms.PermExpensiveClusterQueries,
},
{
Realm: "testproject:realm1",
Permission: rdbperms.PermListTestResults,
},
{
Realm: "testproject:realm3",
Permission: rdbperms.PermListTestResults,
},
}
example := &clustering.Failure{
TestID: "TestID_Example",
Reason: &pb.FailureReason{
PrimaryErrorMessage: "Example failure reason 123.",
},
}
a := &failurereason.Algorithm{}
reasonClusterID := a.Cluster(compiledTestProjectCfg, example)
analysisClient.clustersByProject["testproject"] = []*analysis.Cluster{
{
ClusterID: clustering.ClusterID{
Algorithm: rulesalgorithm.AlgorithmName,
ID: "11111100000000000000000000000000",
},
PresubmitRejects1d: analysis.Counts{Nominal: 1},
PresubmitRejects3d: analysis.Counts{Nominal: 2},
PresubmitRejects7d: analysis.Counts{Nominal: 3},
CriticalFailuresExonerated1d: analysis.Counts{Nominal: 4},
CriticalFailuresExonerated3d: analysis.Counts{Nominal: 5},
CriticalFailuresExonerated7d: analysis.Counts{Nominal: 6},
Failures1d: analysis.Counts{Nominal: 7},
Failures3d: analysis.Counts{Nominal: 8},
Failures7d: analysis.Counts{Nominal: 9},
DistinctUserCLsWithFailures7d: analysis.Counts{Nominal: 10},
PostsubmitBuildsWithFailures7d: analysis.Counts{Nominal: 11},
ExampleFailureReason: bigquery.NullString{Valid: true, StringVal: "Example failure reason."},
TopTestIDs: []analysis.TopCount{
{Value: "TestID 1", Count: 2},
{Value: "TestID 2", Count: 1},
},
Realms: []string{"testproject:realm1", "testproject:realm2"},
},
{
ClusterID: clustering.ClusterID{
Algorithm: testname.AlgorithmName,
ID: "cccccc00000000000000000000000001",
},
PresubmitRejects7d: analysis.Counts{Nominal: 11},
ExampleFailureReason: bigquery.NullString{Valid: true, StringVal: "Example failure reason 2."},
TopTestIDs: []analysis.TopCount{
{Value: "TestID 3", Count: 2},
},
Realms: []string{"testproject:realm2", "testproject:realm3"},
},
{
ClusterID: clustering.ClusterID{
Algorithm: failurereason.AlgorithmName,
ID: hex.EncodeToString(reasonClusterID),
},
PresubmitRejects7d: analysis.Counts{Nominal: 15},
ExampleFailureReason: bigquery.NullString{Valid: true, StringVal: "Example failure reason 123."},
TopTestIDs: []analysis.TopCount{
{Value: "TestID_Example", Count: 10},
},
Realms: []string{"testproject:realm1", "testproject:realm3"},
},
}
request := &pb.BatchGetClustersRequest{
Parent: "projects/testproject",
Names: []string{
// Rule for which data exists.
"projects/testproject/clusters/rules/11111100000000000000000000000000",
// Rule for which no data exists.
"projects/testproject/clusters/rules/1111110000000000000000000000ffff",
// Suggested cluster for which cluster ID matches the example
// provided for the cluster.
"projects/testproject/clusters/" + failurereason.AlgorithmName + "/" + hex.EncodeToString(reasonClusterID),
// Suggested cluster for which data exists, but cluster ID mismatches
// the example provided for the cluster. This could be because
// configuration has changed and re-clustering is not yet complete.
"projects/testproject/clusters/" + testname.AlgorithmName + "/cccccc00000000000000000000000001",
// Suggested cluster for which no impact data exists.
"projects/testproject/clusters/reason-v3/cccccc0000000000000000000000ffff",
},
}
expectedResponse := &pb.BatchGetClustersResponse{
Clusters: []*pb.Cluster{
{
Name: "projects/testproject/clusters/rules/11111100000000000000000000000000",
HasExample: true,
UserClsFailedPresubmit: &pb.Cluster_ImpactValues{
OneDay: &pb.Cluster_Counts{Nominal: 1},
ThreeDay: &pb.Cluster_Counts{Nominal: 2},
SevenDay: &pb.Cluster_Counts{Nominal: 3},
},
CriticalFailuresExonerated: &pb.Cluster_ImpactValues{
OneDay: &pb.Cluster_Counts{Nominal: 4},
ThreeDay: &pb.Cluster_Counts{Nominal: 5},
SevenDay: &pb.Cluster_Counts{Nominal: 6},
},
Failures: &pb.Cluster_ImpactValues{
OneDay: &pb.Cluster_Counts{Nominal: 7},
ThreeDay: &pb.Cluster_Counts{Nominal: 8},
SevenDay: &pb.Cluster_Counts{Nominal: 9},
},
UserClsWithFailures: &pb.Cluster_Counts{Nominal: 10},
PostsubmitBuildsWithFailures: &pb.Cluster_Counts{Nominal: 11},
},
{
Name: "projects/testproject/clusters/rules/1111110000000000000000000000ffff",
HasExample: false,
UserClsFailedPresubmit: emptyMetricValues(),
CriticalFailuresExonerated: emptyMetricValues(),
Failures: emptyMetricValues(),
UserClsWithFailures: &pb.Cluster_Counts{},
PostsubmitBuildsWithFailures: &pb.Cluster_Counts{},
},
{
Name: "projects/testproject/clusters/" + failurereason.AlgorithmName + "/" + hex.EncodeToString(reasonClusterID),
Title: "Example failure reason %.",
HasExample: true,
UserClsFailedPresubmit: &pb.Cluster_ImpactValues{
OneDay: &pb.Cluster_Counts{},
ThreeDay: &pb.Cluster_Counts{},
SevenDay: &pb.Cluster_Counts{Nominal: 15},
},
CriticalFailuresExonerated: emptyMetricValues(),
Failures: emptyMetricValues(),
UserClsWithFailures: &pb.Cluster_Counts{},
PostsubmitBuildsWithFailures: &pb.Cluster_Counts{},
EquivalentFailureAssociationRule: `reason LIKE "Example failure reason %."`,
},
{
Name: "projects/testproject/clusters/" + testname.AlgorithmName + "/cccccc00000000000000000000000001",
Title: "(definition unavailable due to ongoing reclustering)",
HasExample: true,
UserClsFailedPresubmit: &pb.Cluster_ImpactValues{
OneDay: &pb.Cluster_Counts{},
ThreeDay: &pb.Cluster_Counts{},
SevenDay: &pb.Cluster_Counts{Nominal: 11},
},
CriticalFailuresExonerated: emptyMetricValues(),
Failures: emptyMetricValues(),
UserClsWithFailures: &pb.Cluster_Counts{},
PostsubmitBuildsWithFailures: &pb.Cluster_Counts{},
EquivalentFailureAssociationRule: ``,
},
{
Name: "projects/testproject/clusters/reason-v3/cccccc0000000000000000000000ffff",
HasExample: false,
UserClsFailedPresubmit: emptyMetricValues(),
CriticalFailuresExonerated: emptyMetricValues(),
Failures: emptyMetricValues(),
UserClsWithFailures: &pb.Cluster_Counts{},
PostsubmitBuildsWithFailures: &pb.Cluster_Counts{},
},
},
}
Convey("Not authorised to get cluster", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetCluster)
response, err := server.BatchGet(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.get")
So(response, ShouldBeNil)
})
Convey("Not authorised to perform expensive queries", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermExpensiveClusterQueries)
response, err := server.BatchGet(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.expensiveQueries")
So(response, ShouldBeNil)
})
Convey("With a valid request", func() {
Convey("No duplicate requests", func() {
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
Convey("No test result list permission", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestResults)
// Run
response, err := server.BatchGet(ctx, request)
// Verify
for _, r := range expectedResponse.Clusters {
r.Title = ""
r.EquivalentFailureAssociationRule = ""
}
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
Convey("Duplicate requests", func() {
// Even if request items are duplicated, the request
// should still succeed and return correct results.
request.Names = append(request.Names, request.Names...)
expectedResponse.Clusters = append(expectedResponse.Clusters, expectedResponse.Clusters...)
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
})
Convey("With invalid request", func() {
Convey("Invalid parent", func() {
request.Parent = "blah"
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid project name, expected format: projects/{project}")
})
Convey("No names specified", func() {
request.Names = []string{}
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "names must be specified")
})
Convey("Parent does not match request items", func() {
// Request asks for project "blah" but parent asks for
// project "testproject".
So(request.Parent, ShouldEqual, "projects/testproject")
request.Names[1] = "projects/blah/clusters/reason-v3/cccccc00000000000000000000000001"
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, `name 1: project must match parent project ("testproject")`)
})
Convey("Invalid name", func() {
request.Names[1] = "invalid"
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "name 1: invalid cluster name, expected format: projects/{project}/clusters/{cluster_alg}/{cluster_id}")
})
Convey("Invalid cluster algorithm in name", func() {
request.Names[1] = "projects/blah/clusters/reason/cccccc00000000000000000000000001"
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "name 1: invalid cluster identity: algorithm not valid")
})
Convey("Invalid cluster ID in name", func() {
request.Names[1] = "projects/blah/clusters/reason-v3/123"
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "name 1: invalid cluster identity: ID is not valid lowercase hexadecimal bytes")
})
Convey("Too many request items", func() {
var names []string
for i := 0; i < 1001; i++ {
names = append(names, "projects/testproject/clusters/rules/11111100000000000000000000000000")
}
request.Names = names
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "too many names: at most 1000 clusters can be retrieved in one request")
})
Convey("Dataset does not exist", func() {
delete(analysisClient.clustersByProject, "testproject")
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCNotFound, "LUCI Analysis BigQuery dataset not provisioned for project or cluster analysis is not yet available")
})
Convey("With project not configured", func() {
err := config.SetTestProjectConfig(ctx, map[string]*configpb.ProjectConfig{})
So(err, ShouldBeNil)
// Run
response, err := server.BatchGet(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCFailedPrecondition, "project does not exist in LUCI Analysis")
})
})
})
Convey("QueryClusterSummaries", func() {
authState.IdentityPermissions = listTestResultsPermissions(
"testproject:realm1",
"testproject:realm2",
"otherproject:realm3",
)
authState.IdentityPermissions = append(authState.IdentityPermissions, []authtest.RealmPermission{
{
Realm: "testproject:@root",
Permission: perms.PermListClusters,
},
{
Realm: "testproject:@root",
Permission: perms.PermExpensiveClusterQueries,
},
{
Realm: "testproject:@root",
Permission: perms.PermGetRule,
},
{
Realm: "testproject:@root",
Permission: perms.PermGetRuleDefinition,
},
}...)
analysisClient.clusterMetricsByProject["testproject"] = []*analysis.ClusterSummary{
{
ClusterID: clustering.ClusterID{
Algorithm: rulesalgorithm.AlgorithmName,
ID: rs[0].RuleID,
},
PresubmitRejects: 1,
CriticalFailuresExonerated: 2,
Failures: 3,
ExampleFailureReason: bigquery.NullString{Valid: true, StringVal: "Example failure reason."},
ExampleTestID: "TestID 1",
},
{
ClusterID: clustering.ClusterID{
Algorithm: "reason-v3",
ID: "cccccc00000000000000000000000001",
},
PresubmitRejects: 4,
CriticalFailuresExonerated: 5,
Failures: 6,
ExampleFailureReason: bigquery.NullString{Valid: true, StringVal: "Example failure reason 2."},
ExampleTestID: "TestID 3",
},
{
ClusterID: clustering.ClusterID{
// Rule that is no longer active.
Algorithm: rulesalgorithm.AlgorithmName,
ID: "01234567890abcdef01234567890abcdef",
},
PresubmitRejects: 7,
CriticalFailuresExonerated: 8,
Failures: 9,
ExampleFailureReason: bigquery.NullString{Valid: true, StringVal: "Example failure reason."},
ExampleTestID: "TestID 1",
},
}
analysisClient.expectedRealmsQueried = []string{"testproject:realm1", "testproject:realm2"}
request := &pb.QueryClusterSummariesRequest{
Project: "testproject",
FailureFilter: "test_id:\"pita.Boot\" failure_reason:\"failed to boot\"",
OrderBy: "presubmit_rejects desc, critical_failures_exonerated, failures desc",
}
Convey("Not authorised to list clusters", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermListClusters)
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.list")
So(response, ShouldBeNil)
})
Convey("Not authorised to perform expensive queries", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermExpensiveClusterQueries)
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.expensiveQueries")
So(response, ShouldBeNil)
})
Convey("Not authorised to get rules", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetRule)
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.rules.get")
So(response, ShouldBeNil)
})
Convey("Not authorised to list test results in any realm", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestResults)
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permissions [resultdb.testResults.list resultdb.testExonerations.list] in any realm in project \"testproject\"")
So(response, ShouldBeNil)
})
Convey("Not authorised to list test exonerations in any realm", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestExonerations)
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permissions [resultdb.testResults.list resultdb.testExonerations.list] in any realm in project \"testproject\"")
So(response, ShouldBeNil)
})
Convey("Valid request", func() {
expectedResponse := &pb.QueryClusterSummariesResponse{
ClusterSummaries: []*pb.ClusterSummary{
{
ClusterId: &pb.ClusterId{
Algorithm: "rules",
Id: rs[0].RuleID,
},
Title: rs[0].RuleDefinition,
Bug: &pb.AssociatedBug{
System: "monorail",
Id: "chromium/7654321",
LinkText: "crbug.com/7654321",
Url: "https://bugs.chromium.org/p/chromium/issues/detail?id=7654321",
},
PresubmitRejects: 1,
CriticalFailuresExonerated: 2,
Failures: 3,
},
{
ClusterId: &pb.ClusterId{
Algorithm: "reason-v3",
Id: "cccccc00000000000000000000000001",
},
Title: `Example failure reason 2.`,
PresubmitRejects: 4,
CriticalFailuresExonerated: 5,
Failures: 6,
},
{
ClusterId: &pb.ClusterId{
Algorithm: "rules",
Id: "01234567890abcdef01234567890abcdef",
},
Title: `(rule archived)`,
PresubmitRejects: 7,
CriticalFailuresExonerated: 8,
Failures: 9,
},
},
}
Convey("With filters and order by", func() {
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
Convey("Without filters or order", func() {
request.FailureFilter = ""
request.OrderBy = ""
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
Convey("Without rule definition get permission", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetRuleDefinition)
// The RPC cannot return the rule definition as the
// cluster title as the user is not authorised to see it.
// Instead, it should generate a description of the
// content of the cluster based on what the user can see.
expectedResponse.ClusterSummaries[0].Title = "Selected failures in TestID 1"
response, err := server.QueryClusterSummaries(ctx, request)
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
})
Convey("Invalid request", func() {
Convey("Dataset does not exist", func() {
delete(analysisClient.clusterMetricsByProject, "testproject")
// Run
response, err := server.QueryClusterSummaries(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCNotFound, "LUCI Analysis BigQuery dataset not provisioned for project or cluster analysis is not yet available")
})
Convey("Failure filter syntax is invalid", func() {
request.FailureFilter = "test_id::"
// Run
response, err := server.QueryClusterSummaries(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "failure_filter: expected arg after :")
})
Convey("Failure filter references non-existant column", func() {
request.FailureFilter = `test:"pita.Boot"`
// Run
response, err := server.QueryClusterSummaries(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, `failure_filter: no filterable field named "test"`)
})
Convey("Failure filter references unimplemented feature", func() {
request.FailureFilter = "test<=\"blah\""
// Run
response, err := server.QueryClusterSummaries(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "failure_filter: comparator operator not implemented yet")
})
Convey("Order by syntax invalid", func() {
request.OrderBy = "presubmit_rejects asc"
// Run
response, err := server.QueryClusterSummaries(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, `order_by: invalid ordering "presubmit_rejects asc"`)
})
Convey("Order by syntax references invalid column", func() {
request.OrderBy = "not_exists desc"
// Run
response, err := server.QueryClusterSummaries(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, `order_by: no sortable field named "not_exists"`)
})
})
})
Convey("GetReclusteringProgress", func() {
authState.IdentityPermissions = []authtest.RealmPermission{{
Realm: "testproject:@root",
Permission: perms.PermGetCluster,
}}
request := &pb.GetReclusteringProgressRequest{
Name: "projects/testproject/reclusteringProgress",
}
Convey("Not authorised to get cluster", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetCluster)
response, err := server.GetReclusteringProgress(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.get")
So(response, ShouldBeNil)
})
Convey("With a valid request", func() {
rulesVersion := time.Date(2021, time.January, 1, 1, 0, 0, 0, time.UTC)
reference := time.Date(2020, time.February, 1, 1, 0, 0, 0, time.UTC)
configVersion := time.Date(2019, time.March, 1, 1, 0, 0, 0, time.UTC)
rns := []*runs.ReclusteringRun{
runs.NewRun(0).
WithProject("testproject").
WithAttemptTimestamp(reference.Add(-5 * time.Minute)).
WithRulesVersion(rulesVersion).
WithAlgorithmsVersion(2).
WithConfigVersion(configVersion).
WithNoReportedProgress().
Build(),
runs.NewRun(1).
WithProject("testproject").
WithAttemptTimestamp(reference.Add(-10 * time.Minute)).
WithRulesVersion(rulesVersion).
WithAlgorithmsVersion(2).
WithConfigVersion(configVersion).
WithReportedProgress(500).
Build(),
runs.NewRun(2).
WithProject("testproject").
WithAttemptTimestamp(reference.Add(-20 * time.Minute)).
WithRulesVersion(rulesVersion.Add(-1 * time.Hour)).
WithAlgorithmsVersion(1).
WithConfigVersion(configVersion.Add(-1 * time.Hour)).
WithCompletedProgress().
Build(),
}
err := runs.SetRunsForTesting(ctx, rns)
So(err, ShouldBeNil)
// Run
response, err := server.GetReclusteringProgress(ctx, request)
// Verify.
So(err, ShouldBeNil)
So(response, ShouldResembleProto, &pb.ReclusteringProgress{
Name: "projects/testproject/reclusteringProgress",
ProgressPerMille: 500,
Last: &pb.ClusteringVersion{
AlgorithmsVersion: 1,
ConfigVersion: timestamppb.New(configVersion.Add(-1 * time.Hour)),
RulesVersion: timestamppb.New(rulesVersion.Add(-1 * time.Hour)),
},
Next: &pb.ClusteringVersion{
AlgorithmsVersion: 2,
ConfigVersion: timestamppb.New(configVersion),
RulesVersion: timestamppb.New(rulesVersion),
},
})
})
Convey("With an invalid request", func() {
Convey("Invalid name", func() {
request.Name = "invalid"
// Run
response, err := server.GetReclusteringProgress(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "name: invalid reclustering progress name, expected format: projects/{project}/reclusteringProgress")
})
})
})
Convey("QueryClusterFailures", func() {
authState.IdentityPermissions = listTestResultsPermissions(
"testproject:realm1",
"testproject:realm2",
"otherproject:realm3",
)
authState.IdentityPermissions = append(authState.IdentityPermissions, authtest.RealmPermission{
Realm: "testproject:@root",
Permission: perms.PermGetCluster,
}, authtest.RealmPermission{
Realm: "testproject:@root",
Permission: perms.PermExpensiveClusterQueries,
})
request := &pb.QueryClusterFailuresRequest{
Parent: "projects/testproject/clusters/reason-v1/cccccc00000000000000000000000001/failures",
}
Convey("Not authorised to get cluster", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetCluster)
response, err := server.QueryClusterFailures(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.get")
So(response, ShouldBeNil)
})
Convey("Not authorised to perform expensive queries", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermExpensiveClusterQueries)
response, err := server.QueryClusterFailures(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.expensiveQueries")
So(response, ShouldBeNil)
})
Convey("Not authorised to list test results in any realm", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestResults)
response, err := server.QueryClusterFailures(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permissions [resultdb.testResults.list resultdb.testExonerations.list] in any realm in project \"testproject\"")
So(response, ShouldBeNil)
})
Convey("Not authorised to list test exonerations in any realm", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestExonerations)
response, err := server.QueryClusterFailures(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permissions [resultdb.testResults.list resultdb.testExonerations.list] in any realm in project \"testproject\"")
So(response, ShouldBeNil)
})
Convey("With a valid request", func() {
analysisClient.expectedRealmsQueried = []string{"testproject:realm1", "testproject:realm2"}
analysisClient.failuresByProjectAndCluster["testproject"] = map[clustering.ClusterID][]*analysis.ClusterFailure{
{
Algorithm: "reason-v1",
ID: "cccccc00000000000000000000000001",
}: {
{
TestID: bqString("testID-1"),
Variant: []*analysis.Variant{
{
Key: bqString("key1"),
Value: bqString("value1"),
},
{
Key: bqString("key2"),
Value: bqString("value2"),
},
},
PresubmitRunID: &analysis.PresubmitRunID{
System: bqString("luci-cv"),
ID: bqString("123456789"),
},
PresubmitRunOwner: bqString("user"),
PresubmitRunMode: bqString(analysis.ToBQPresubmitRunMode(pb.PresubmitRunMode_QUICK_DRY_RUN)),
Changelists: []*analysis.Changelist{
{
Host: bqString("testproject.googlesource.com"),
Change: bigquery.NullInt64{Int64: 100006, Valid: true},
Patchset: bigquery.NullInt64{Int64: 106, Valid: true},
OwnerKind: bqString(pb.ChangelistOwnerKind_AUTOMATION.String()),
},
{
Host: bqString("testproject-internal.googlesource.com"),
Change: bigquery.NullInt64{Int64: 100007, Valid: true},
Patchset: bigquery.NullInt64{Int64: 107, Valid: true},
OwnerKind: bqString(pb.ChangelistOwnerKind_HUMAN.String()),
},
},
PartitionTime: bigquery.NullTimestamp{Timestamp: time.Date(2123, time.April, 1, 2, 3, 4, 5, time.UTC), Valid: true},
Exonerations: []*analysis.Exoneration{
{
Reason: bqString(pb.ExonerationReason_OCCURS_ON_MAINLINE.String()),
},
{
Reason: bqString(pb.ExonerationReason_NOT_CRITICAL.String()),
},
},
BuildStatus: bqString(analysis.ToBQBuildStatus(pb.BuildStatus_BUILD_STATUS_FAILURE)),
IsBuildCritical: bigquery.NullBool{Bool: true, Valid: true},
IngestedInvocationID: bqString("build-1234567890"),
IsIngestedInvocationBlocked: bigquery.NullBool{Bool: true, Valid: true},
Count: 15,
},
{
TestID: bigquery.NullString{StringVal: "testID-2"},
Variant: []*analysis.Variant{
{
Key: bqString("key1"),
Value: bqString("value2"),
},
{
Key: bqString("key3"),
Value: bqString("value3"),
},
},
PresubmitRunID: nil,
PresubmitRunOwner: bigquery.NullString{},
PresubmitRunMode: bigquery.NullString{},
Changelists: nil,
PartitionTime: bigquery.NullTimestamp{Timestamp: time.Date(2124, time.May, 2, 3, 4, 5, 6, time.UTC), Valid: true},
BuildStatus: bqString(analysis.ToBQBuildStatus(pb.BuildStatus_BUILD_STATUS_CANCELED)),
IsBuildCritical: bigquery.NullBool{},
IngestedInvocationID: bqString("build-9888887771"),
IsIngestedInvocationBlocked: bigquery.NullBool{Bool: true, Valid: true},
Count: 1,
},
},
}
expectedResponse := &pb.QueryClusterFailuresResponse{
Failures: []*pb.DistinctClusterFailure{
{
TestId: "testID-1",
Variant: pbutil.Variant("key1", "value1", "key2", "value2"),
PartitionTime: timestamppb.New(time.Date(2123, time.April, 1, 2, 3, 4, 5, time.UTC)),
PresubmitRun: &pb.DistinctClusterFailure_PresubmitRun{
PresubmitRunId: &pb.PresubmitRunId{
System: "luci-cv",
Id: "123456789",
},
Owner: "user",
Mode: pb.PresubmitRunMode_QUICK_DRY_RUN,
},
IsBuildCritical: true,
Exonerations: []*pb.DistinctClusterFailure_Exoneration{{
Reason: pb.ExonerationReason_OCCURS_ON_MAINLINE,
}, {
Reason: pb.ExonerationReason_NOT_CRITICAL,
}},
BuildStatus: pb.BuildStatus_BUILD_STATUS_FAILURE,
IngestedInvocationId: "build-1234567890",
IsIngestedInvocationBlocked: true,
Changelists: []*pb.Changelist{
{
Host: "testproject.googlesource.com",
Change: 100006,
Patchset: 106,
OwnerKind: pb.ChangelistOwnerKind_AUTOMATION,
},
{
Host: "testproject-internal.googlesource.com",
Change: 100007,
Patchset: 107,
OwnerKind: pb.ChangelistOwnerKind_HUMAN,
},
},
Count: 15,
},
{
TestId: "testID-2",
Variant: pbutil.Variant("key1", "value2", "key3", "value3"),
PartitionTime: timestamppb.New(time.Date(2124, time.May, 2, 3, 4, 5, 6, time.UTC)),
PresubmitRun: nil,
IsBuildCritical: false,
Exonerations: nil,
BuildStatus: pb.BuildStatus_BUILD_STATUS_CANCELED,
IngestedInvocationId: "build-9888887771",
IsIngestedInvocationBlocked: true,
Count: 1,
},
},
}
// Run
response, err := server.QueryClusterFailures(ctx, request)
// Verify.
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
Convey("With an invalid request", func() {
Convey("Invalid parent", func() {
request.Parent = "blah"
// Run
response, err := server.QueryClusterFailures(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid cluster failures name, expected format: projects/{project}/clusters/{cluster_alg}/{cluster_id}/failures")
})
Convey("Invalid cluster algorithm in parent", func() {
request.Parent = "projects/blah/clusters/reason/cccccc00000000000000000000000001/failures"
// Run
response, err := server.QueryClusterFailures(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid cluster identity: algorithm not valid")
})
Convey("Invalid cluster ID in parent", func() {
request.Parent = "projects/blah/clusters/reason-v3/123/failures"
// Run
response, err := server.QueryClusterFailures(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid cluster identity: ID is not valid lowercase hexadecimal bytes")
})
Convey("Dataset does not exist", func() {
delete(analysisClient.clustersByProject, "testproject")
// Run
response, err := server.QueryClusterFailures(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCNotFound, "LUCI Analysis BigQuery dataset not provisioned for project or clustered failures not yet available")
})
})
})
Convey("QueryExoneratedTestVariants", func() {
authState.IdentityPermissions = listTestResultsPermissions(
"testproject:realm1",
"testproject:realm2",
"otherproject:realm3",
)
authState.IdentityPermissions = append(authState.IdentityPermissions, authtest.RealmPermission{
Realm: "testproject:@root",
Permission: perms.PermGetCluster,
}, authtest.RealmPermission{
Realm: "testproject:@root",
Permission: perms.PermExpensiveClusterQueries,
})
request := &pb.QueryClusterExoneratedTestVariantsRequest{
Parent: "projects/testproject/clusters/reason-v1/cccccc00000000000000000000000001/exoneratedTestVariants",
}
Convey("Not authorised to get cluster", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermGetCluster)
response, err := server.QueryExoneratedTestVariants(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.get")
So(response, ShouldBeNil)
})
Convey("Not authorised to perform expensive queries", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, perms.PermExpensiveClusterQueries)
response, err := server.QueryExoneratedTestVariants(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permission analysis.clusters.expensiveQueries")
So(response, ShouldBeNil)
})
Convey("Not authorised to list test results in any realm", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestResults)
response, err := server.QueryExoneratedTestVariants(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permissions [resultdb.testResults.list resultdb.testExonerations.list] in any realm in project \"testproject\"")
So(response, ShouldBeNil)
})
Convey("Not authorised to list test exonerations in any realm", func() {
authState.IdentityPermissions = removePermission(authState.IdentityPermissions, rdbperms.PermListTestExonerations)
response, err := server.QueryExoneratedTestVariants(ctx, request)
So(err, ShouldBeRPCPermissionDenied, "caller does not have permissions [resultdb.testResults.list resultdb.testExonerations.list] in any realm in project \"testproject\"")
So(response, ShouldBeNil)
})
Convey("With a valid request", func() {
analysisClient.expectedRealmsQueried = []string{"testproject:realm1", "testproject:realm2"}
analysisClient.exoneratedTVsByProjectAndCluster["testproject"] = map[clustering.ClusterID][]*analysis.ExoneratedTestVariant{
{
Algorithm: "reason-v1",
ID: "cccccc00000000000000000000000001",
}: {
{
TestID: bqString("testID-1"),
Variant: []*analysis.Variant{
{
Key: bqString("key1"),
Value: bqString("value1"),
},
{
Key: bqString("key2"),
Value: bqString("value2"),
},
},
CriticalFailuresExonerated: 51,
LastExoneration: bigquery.NullTimestamp{Timestamp: time.Date(2123, time.April, 1, 2, 3, 4, 5, time.UTC), Valid: true},
},
{
TestID: bigquery.NullString{StringVal: "testID-2"},
Variant: []*analysis.Variant{
{
Key: bqString("key1"),
Value: bqString("value2"),
},
{
Key: bqString("key3"),
Value: bqString("value3"),
},
},
CriticalFailuresExonerated: 172,
LastExoneration: bigquery.NullTimestamp{Timestamp: time.Date(2124, time.May, 2, 3, 4, 5, 6, time.UTC), Valid: true},
},
},
}
expectedResponse := &pb.QueryClusterExoneratedTestVariantsResponse{
TestVariants: []*pb.ClusterExoneratedTestVariant{
{
TestId: "testID-1",
Variant: pbutil.Variant("key1", "value1", "key2", "value2"),
CriticalFailuresExonerated: 51,
LastExoneration: timestamppb.New(time.Date(2123, time.April, 1, 2, 3, 4, 5, time.UTC)),
},
{
TestId: "testID-2",
Variant: pbutil.Variant("key1", "value2", "key3", "value3"),
CriticalFailuresExonerated: 172,
LastExoneration: timestamppb.New(time.Date(2124, time.May, 2, 3, 4, 5, 6, time.UTC)),
},
},
}
// Run
response, err := server.QueryExoneratedTestVariants(ctx, request)
// Verify.
So(err, ShouldBeNil)
So(response, ShouldResembleProto, expectedResponse)
})
Convey("With an invalid request", func() {
Convey("Invalid parent", func() {
request.Parent = "blah"
// Run
response, err := server.QueryExoneratedTestVariants(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid cluster failures name, expected format: projects/{project}/clusters/{cluster_alg}/{cluster_id}/exoneratedTestVariants")
})
Convey("Invalid cluster algorithm in parent", func() {
request.Parent = "projects/blah/clusters/reason/cccccc00000000000000000000000001/exoneratedTestVariants"
// Run
response, err := server.QueryExoneratedTestVariants(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid cluster identity: algorithm not valid")
})
Convey("Invalid cluster ID in parent", func() {
request.Parent = "projects/blah/clusters/reason-v3/123/exoneratedTestVariants"
// Run
response, err := server.QueryExoneratedTestVariants(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCInvalidArgument, "parent: invalid cluster identity: ID is not valid lowercase hexadecimal bytes")
})
Convey("Dataset does not exist", func() {
delete(analysisClient.clustersByProject, "testproject")
// Run
response, err := server.QueryExoneratedTestVariants(ctx, request)
// Verify
So(response, ShouldBeNil)
So(err, ShouldBeRPCNotFound, "LUCI Analysis BigQuery dataset not provisioned for project or clustered failures not yet available")
})
})
})
})
}
func bqString(value string) bigquery.NullString {
return bigquery.NullString{StringVal: value, Valid: true}
}
func listTestResultsPermissions(realms ...string) []authtest.RealmPermission {
var result []authtest.RealmPermission
for _, r := range realms {
result = append(result, authtest.RealmPermission{
Realm: r,
Permission: rdbperms.PermListTestResults,
})
result = append(result, authtest.RealmPermission{
Realm: r,
Permission: rdbperms.PermListTestExonerations,
})
}
return result
}
func removePermission(perms []authtest.RealmPermission, permission realms.Permission) []authtest.RealmPermission {
var result []authtest.RealmPermission
for _, p := range perms {
if p.Permission != permission {
result = append(result, p)
}
}
return result
}
func emptyMetricValues() *pb.Cluster_ImpactValues {
return &pb.Cluster_ImpactValues{
OneDay: &pb.Cluster_Counts{},
ThreeDay: &pb.Cluster_Counts{},
SevenDay: &pb.Cluster_Counts{},
}
}
func failureReasonClusterEntry(projectcfg *compiledcfg.ProjectConfig, primaryErrorMessage string) *pb.ClusterResponse_ClusteredTestResult_ClusterEntry {
alg := &failurereason.Algorithm{}
clusterID := alg.Cluster(projectcfg, &clustering.Failure{
Reason: &pb.FailureReason{
PrimaryErrorMessage: primaryErrorMessage,
},
})
return &pb.ClusterResponse_ClusteredTestResult_ClusterEntry{
ClusterId: &pb.ClusterId{
Algorithm: failurereason.AlgorithmName,
Id: hex.EncodeToString(clusterID),
},
}
}
func testNameClusterEntry(projectcfg *compiledcfg.ProjectConfig, testID string) *pb.ClusterResponse_ClusteredTestResult_ClusterEntry {
alg := &testname.Algorithm{}
clusterID := alg.Cluster(projectcfg, &clustering.Failure{
TestID: testID,
})
return &pb.ClusterResponse_ClusteredTestResult_ClusterEntry{
ClusterId: &pb.ClusterId{
Algorithm: testname.AlgorithmName,
Id: hex.EncodeToString(clusterID),
},
}
}
// sortClusterEntries sorts clusters by ascending Cluster ID.
func sortClusterEntries(entries []*pb.ClusterResponse_ClusteredTestResult_ClusterEntry) []*pb.ClusterResponse_ClusteredTestResult_ClusterEntry {
result := make([]*pb.ClusterResponse_ClusteredTestResult_ClusterEntry, len(entries))
copy(result, entries)
sort.Slice(result, func(i, j int) bool {
if result[i].ClusterId.Algorithm != result[j].ClusterId.Algorithm {
return result[i].ClusterId.Algorithm < result[j].ClusterId.Algorithm
}
return result[i].ClusterId.Id < result[j].ClusterId.Id
})
return result
}
type fakeAnalysisClient struct {
clustersByProject map[string][]*analysis.Cluster
failuresByProjectAndCluster map[string]map[clustering.ClusterID][]*analysis.ClusterFailure
exoneratedTVsByProjectAndCluster map[string]map[clustering.ClusterID][]*analysis.ExoneratedTestVariant
clusterMetricsByProject map[string][]*analysis.ClusterSummary
expectedRealmsQueried []string
}
func newFakeAnalysisClient() *fakeAnalysisClient {
return &fakeAnalysisClient{
clustersByProject: make(map[string][]*analysis.Cluster),
failuresByProjectAndCluster: make(map[string]map[clustering.ClusterID][]*analysis.ClusterFailure),
exoneratedTVsByProjectAndCluster: make(map[string]map[clustering.ClusterID][]*analysis.ExoneratedTestVariant),
clusterMetricsByProject: make(map[string][]*analysis.ClusterSummary),
}
}
func (f *fakeAnalysisClient) ReadClusters(ctx context.Context, project string, clusterIDs []clustering.ClusterID) ([]*analysis.Cluster, error) {
clusters, ok := f.clustersByProject[project]
if !ok {
return nil, analysis.ProjectNotExistsErr
}
var results []*analysis.Cluster
for _, c := range clusters {
include := false
for _, ci := range clusterIDs {
if ci == c.ClusterID {
include = true
}
}
if include {
results = append(results, c)
}
}
return results, nil
}
func (f *fakeAnalysisClient) QueryClusterSummaries(ctx context.Context, project string, options *analysis.QueryClusterSummariesOptions) ([]*analysis.ClusterSummary, error) {
clusters, ok := f.clusterMetricsByProject[project]
if !ok {
return nil, analysis.ProjectNotExistsErr
}
set := stringset.NewFromSlice(options.Realms...)
if set.Len() != len(f.expectedRealmsQueried) || !set.HasAll(f.expectedRealmsQueried...) {
panic("realms passed to QueryClusterSummaries do not match expected")
}
_, _, err := analysis.ClusteredFailuresTable.WhereClause(options.FailureFilter, "w_")
if err != nil {
return nil, analysis.InvalidArgumentTag.Apply(errors.Annotate(err, "failure_filter").Err())
}
_, err = analysis.ClusterSummariesTable.OrderByClause(options.OrderBy)
if err != nil {
return nil, analysis.InvalidArgumentTag.Apply(errors.Annotate(err, "order_by").Err())
}
var results []*analysis.ClusterSummary
for _, c := range clusters {
results = append(results, c)
}
return results, nil
}
func (f *fakeAnalysisClient) ReadClusterFailures(ctx context.Context, options analysis.ReadClusterFailuresOptions) ([]*analysis.ClusterFailure, error) {
failuresByCluster, ok := f.failuresByProjectAndCluster[options.Project]
if !ok {
return nil, analysis.ProjectNotExistsErr
}
set := stringset.NewFromSlice(options.Realms...)
if set.Len() != len(f.expectedRealmsQueried) || !set.HasAll(f.expectedRealmsQueried...) {
panic("realms passed to ReadClusterFailures do not match expected")
}
return failuresByCluster[options.ClusterID], nil
}
func (f *fakeAnalysisClient) ReadClusterExoneratedTestVariants(ctx context.Context, options analysis.ReadClusterExoneratedTestVariantsOptions) ([]*analysis.ExoneratedTestVariant, error) {
exoneratedTVsByCluster, ok := f.exoneratedTVsByProjectAndCluster[options.Project]
if !ok {
return nil, analysis.ProjectNotExistsErr
}
set := stringset.NewFromSlice(options.Realms...)
if set.Len() != len(f.expectedRealmsQueried) || !set.HasAll(f.expectedRealmsQueried...) {
panic("realms passed to ReadClusterFailures do not match expected")
}
return exoneratedTVsByCluster[options.ClusterID], nil
}