Import changes for goma server
- fc28b776ba28f56117aae335f7bef710da45c72b remoteexec: fix TestUploadInputFiles
- c327a441b0510fd4f623f09050d01f59987db0dd use go 1.13.7
- b0293a342ce0f3acc02c9862b740b565ad4e082d roll github.com/golang/protobuf v1.3.2 to v1.3.3
- c6cbbe89b235735691b6e962484c6b965b5b1600 merkletree: remove .keep_me hack
- 8382880bd530582d3d992dab035986930568d6c5 roll google.golang.org/grpc v1.26.0 to v1.27.0
- 726253c4d73965be922bf4eda1b1a06a40e5143b remoteexec: Refactor uploadInputFiles
- 942f049ddbd7ef23af1c87edf7e1c816eab6d470 remoteexec: uploadInputFiles returns errors
- 0e97e016329d602d9f4395a9ed89fd2bbf53c65e remoteexec: upload files to file-server in batches
- a1ba3f8a84a39e4bf8167e247cea6174b14795f3 remoteexec: gomaInput supports multiple blob upload
- 66a80acaab9b5fdc2a44a67f1427e9667e8d82c9 remoteexec: Reduce message of successful inputFiles
- 2a4b7380727fe086c34481bd23290d5b3e3285fc use go 1.13.6
- 7856755c29d0dc76b9a789ed461a7a791bb7e206 remoteexec: Simplify result creation in inputFiles()
- 55dfe14632e34f402875b5ec4026441cb7a155a9 exec_server: file-lookup-concurrency defaults to 20
- c7f1560b5f7c94159db6c30cf6ef2c22907048fe roll cloud.google.com/go from v0.51.0 to v0.52.0
- 144c70591ee7f3692be709d03d73bfea4fa18aad remoteexec/cas: Check BatchUpdateBlobsRequest proto size
- bc8fe0340ac23c837b418375baf4dc1332abd111 exec_server: Reduce cas file lookup concurrency 100 -> 20
- a19839bc86fef4aea7bc97b6970926f44c0f2d49 remoteexec/cas: Fix typo in cas_test.go
- e9d616740893349e6f937861cd1442bc00666ff2 remoteexec/cas: Limit CAS batch updates to 1000
- 60e5ee5e8155de5e9adb073611685b004223daf8 Allow the audience string stored in the git repository.
- 0d349f7415cf3f2de74a423c5f3e0b4de3ab1b3b merkletree: duplicate symlinks/dirs are not error
- d812400ff13d45e07b921abe508ff4ea3c424e86 roll contrib.go.opencensus.io/exporter/stackdriver v0.12....
- 372222e44e945b700fb584b44bc59f731b14b0a5 remoteexec: number of requests per wrapper types
- 2d381dbf4c4bbb2d4e94cbc8e2f120b15981b0f4 remoteexec: Adapter-wide sema for CAS lookupBlob
- 7036cf5ad607cefa6bcd278bdd2c6ae35d3d483d remoteexec: Create FileLookupSema in Adapter
- bff170368f7e2657f502e5015692698a616c359f remoteexec/cas: Concurrent blob lookup in file store
- 7c632fe404f065740bc6c0924cfe9a8554e0e3c9 remoteexec/cas: Use RBE's fakes.CAS for testing
- 70f2c93d85b296c3b6e8b7e096061d19f7207899 remoteexec: Support javac cwd agnostic check
- 7f14b767f9c42e68d47e9274ac489048f111e433 remoteexec/cas: Pass struct ptr to struct functions
- 0f94a6b3421da5d6a38640e85d7b259fbd7c159c remoteexec: Log CAS errors and return early
- 6b537728242f687d779a921def88c3d773bd4636 remoteexec/cas: helper funcs for blob lookup and batch re...
- c59b1b3664c2a31c79711fc0c856070070399828 roll cloud.google.com/go/storage v1.4.0 to v1.5.0
- 6709ea49a3e6d0291a4265c895367fce513f6707 remoteexec/cas: Explicitly separate small/large blobs
- 2c830147f02e4d45839ce6c4a9e36db3789bb577 remoteexec/cas: test CAS.Upload()'s bytestream upload
- ae5b7f95c67047a429e9c07bc3106ac3b0461b61 remoteexec/cas: Add unit test for CAS.Upload()
- 9b68e7167f149557163c0065312699c684b2462e remoteexec/cas: Add unit test for CAS.Missing()
- 317c004525e555ae24ec59834e6ca544df42f2f2 server: drop trace fraction and qps
- d17057ac9a59dde2e82480eeb03d17e762533bcf Reland "Reland "remoteexec: don't use sibling docker""
- 3a11774b46ccc6c3d257740636aff632e6f9a4a8 Revert "Reland "remoteexec: don't use sibling docker""
- 35ae2c4ac6887957307dfa81cec0e75832abae6a roll cloud.google.com/go v0.50.0 to v0.51.0
- 30d950518d05977c41c8f0d1afed15d30248c2fd Reland "remoteexec: don't use sibling docker"
- ee5243d8531cde95846e659287696b702a46be56 server: default trace fraction down from 1.0 to 0.5
- c6a98a4d29879a9accb756d29ebb1939d66ccb8e normalizer: fix for armv7a-cros-linux-gnueabihf
- b978c1c4087b012b48c479662d94d1df4fc8e3cb Revert "remoteexec: don't use sibling docker"
- 3b20f501fd195694f4108f63ce2c1681502d4968 Revert "remoteexec: fix nsjail option"
- a997ba139d9cac1b90b794dd93672f605f3d0b33 Revert "remoteexec: fix nsjail"
- 852897bf7d1b7994f8846aeaaf24d006d16abb81 remoteexec: fix nsjail
- 2a6bc4a4acc88175eb6eb6222abc14f09abb58df remoteexec: fix nsjail option
- 994b7a4f97707a280c672fa8544ee6879112032b remoteexec: don't use sibling docker
- 257f9c9c1cab29d6cad7e3401878197dbb57ee53 roll cloud.google.com/go v0.49.0 to v0.50.0
- 0e5482a738df170bda7df8fbfcb6e2aa433d31d7 roll google.golang.org/grpc v1.25.1 to v1.26.0
- a13936c9478838f6766a0ddc4fae7a212f32c7ad Revert "remoteexec: don't use sibling docker"
- 17aff5222ce72a5776bde4064cd3d34e7652905d remoteexec: don't use sibling docker
- 727ffd963e02aee0584c73b27cc8171e413416f2 toolchain.config managed in k8s configmap
- 8cc8f1910fb0ea38e0bc94e1d891be4bb8aed378 remoteexec: introduce wrapperType
- 4f96a9dd2f867760d7d920b5d655f7297c7e63f9 remoteexec: Move OpenCensus stats to stats.go
- b459260bd395a891db9d27d5140bed59feddaf50 use go 1.13.5
- 6959d4be1f0147defa862bfd36a5ed76eb4e1a65 Revert "command: pubsub-error uses last value"
- 854a0bf7465499ff814027b61a321ecc523dd1a7 use trace limited sampler for all servers
- ca0b8aefe7411371a79938ea634b232c62674898 Revert "use remote sampler for all servers"
- 8dcbc3283bb16ebea3f9eb0e97eb6ccc18dc99fa Support -fdebug-compilation-dir when checking CWD agnostic
GitOrigin-RevId: fc28b776ba28f56117aae335f7bef710da45c72b
Change-Id: I631bb5122e4647e9f9dcf52830cfb0e3cd248bf1
TBR=yyanagisawa@google.com, sque@google.com, yekuang@google.com
diff --git a/.gitallowed b/.gitallowed
new file mode 100644
index 0000000..d149be0
--- /dev/null
+++ b/.gitallowed
@@ -0,0 +1 @@
+687418631491-r6m1c3pr0lth5atp4ie07f03ae8omefc.apps.googleusercontent.com
diff --git a/cipd_manifest.txt b/cipd_manifest.txt
index 2da9588..613b227 100644
--- a/cipd_manifest.txt
+++ b/cipd_manifest.txt
@@ -13,7 +13,7 @@
# https://chrome-infra-packages.appspot.com/
# go
-infra/go/${platform} version:1.13.4
+infra/3pp/tools/go/${platform} version:1.13.7
# protoc
# If the version you want is missing, please follow the instruction in:
diff --git a/cipd_manifest.versions b/cipd_manifest.versions
index b3275a7..e72fe3a 100644
--- a/cipd_manifest.versions
+++ b/cipd_manifest.versions
@@ -1,9 +1,9 @@
# This file is auto-generated by 'cipd ensure-file-resolve'.
# Do not modify manually. All changes will be overwritten.
-infra/go/linux-amd64
- version:1.13.4
- 2Z8aYCopalIjDFp9SV3XApZaSKi3gBdgi_XYr6k7cKYC
+infra/3pp/tools/go/linux-amd64
+ version:1.13.7
+ BSP3G-zh4nYyRapTOCGWJvMD1O9I7NwkBnxBnsJStNUC
infra/tools/protoc/linux-amd64
protobuf_version:v3.7.0
diff --git a/cmd/auth_server/main.go b/cmd/auth_server/main.go
index 5042841..7c70a50 100644
--- a/cmd/auth_server/main.go
+++ b/cmd/auth_server/main.go
@@ -133,7 +133,7 @@
logger.Fatal(err)
}
trace.ApplyConfig(trace.Config{
- DefaultSampler: server.NewRemoteSampler(true, trace.NeverSample()),
+ DefaultSampler: server.NewLimitedSampler(server.DefaultTraceFraction, server.DefaultTraceQPS),
})
s, err := server.NewGRPC(*port)
diff --git a/cmd/exec_server/main.go b/cmd/exec_server/main.go
index 07a19d8..906aae8 100644
--- a/cmd/exec_server/main.go
+++ b/cmd/exec_server/main.go
@@ -52,11 +52,13 @@
)
var (
- port = flag.Int("port", 5050, "rpc port")
- mport = flag.Int("mport", 8081, "monitor port")
- fileAddr = flag.String("file-addr", "passthrough:///file-server:5050", "file server address")
- configMapURI = flag.String("configmap_uri", "", "configmap uri. e.g. gs://$project-toolchain-config/$name.config, text proto of command.ConfigMap.")
- configMap = flag.String("configmap", "", "configmap text proto")
+ port = flag.Int("port", 5050, "rpc port")
+ mport = flag.Int("mport", 8081, "monitor port")
+ fileAddr = flag.String("file-addr", "passthrough:///file-server:5050", "file server address")
+ configMapURI = flag.String("configmap_uri", "", "deprecated: configmap uri. e.g. gs://$project-toolchain-config/$name.config, text proto of command.ConfigMap.")
+ configMap = flag.String("configmap", "", "configmap text proto")
+ toolchainConfigBucket = flag.String("toolchain-config-bucket", "", "cloud storage bucket for toolchain config")
+ configMapFile = flag.String("configmap_file", "", "filename for configmap text proto")
traceProjectID = flag.String("trace-project-id", "", "project id for cloud tracing")
pubsubProjectID = flag.String("pubsub-project-id", "", "project id for pubsub")
@@ -67,7 +69,7 @@
cmdFilesBucket = flag.String("cmd-files-bucket", "", "cloud storage bucket for command binary files")
// Needed for b/120582303, but will be deprecated by b/80508682.
- fileLookupConcurrency = flag.Int("file-lookup-concurrency", 5, "concurrency to look up files from file-server")
+ fileLookupConcurrency = flag.Int("file-lookup-concurrency", 20, "concurrency to look up files from file-server")
)
var (
@@ -166,7 +168,7 @@
cancel func()
}
-func newConfigServer(ctx context.Context, inventory *exec.Inventory, uri string, gsclient *storage.Client, opts ...option.ClientOption) (*configServer, error) {
+func newConfigServer(ctx context.Context, inventory *exec.Inventory, bucket, configMapFile string, cm *cmdpb.ConfigMap, gsclient *storage.Client, opts ...option.ClientOption) (*configServer, error) {
cs := &configServer{
inventory: inventory,
}
@@ -182,7 +184,9 @@
return nil, fmt.Errorf("pubsub client failed: %v", err)
}
cs.configmap = command.ConfigMapBucket{
- URI: uri,
+ URI: fmt.Sprintf("gs://%s/", bucket),
+ ConfigMap: cm,
+ ConfigMapFile: configMapFile,
StorageClient: stiface.AdaptClient(gsclient),
PubsubClient: cs.psclient,
SubscriberID: fmt.Sprintf("toolchain-config-%s-%s", server.ClusterName(ctx), server.HostName(ctx)),
@@ -270,8 +274,8 @@
logger := log.FromContext(ctx)
defer logger.Sync()
- if *configMapURI == "" && *configMap == "" {
- logger.Fatalf("--configmap_uri or --configmap must be given")
+ if (*toolchainConfigBucket == "" || *configMapFile == "") && *configMap == "" {
+ logger.Fatalf("--toolchain-config-bucket,--configmap_file or --configmap must be given")
}
if *remoteexecAddr == "" {
logger.Fatalf("--remoteexec-addr must be given")
@@ -299,7 +303,7 @@
logger.Fatal(err)
}
trace.ApplyConfig(trace.Config{
- DefaultSampler: server.NewRemoteSampler(true, trace.NeverSample()),
+ DefaultSampler: server.NewLimitedSampler(server.DefaultTraceFraction, server.DefaultTraceQPS),
})
s, err := server.NewGRPC(*port,
@@ -317,8 +321,8 @@
var gsclient *storage.Client
var opts []option.ClientOption
- if *configMapURI != "" || *cmdFilesBucket != "" {
- logger.Infof("configmap_uri or cmd-files-bucket is specified. use cloud storage")
+ if *toolchainConfigBucket != "" || *cmdFilesBucket != "" {
+ logger.Infof("toolchain-config-bucket or cmd-files-bucket is specified. use cloud storage")
if *serviceAccountFile != "" {
opts = append(opts, option.WithServiceAccountFile(*serviceAccountFile))
}
@@ -344,6 +348,10 @@
logger.Fatalf("--remote-instance-prefix must be given for remoteexec API")
}
+ if *fileLookupConcurrency == 0 {
+ *fileLookupConcurrency = 1
+ }
+ casBlobLookupConcurrency := 20
re := &remoteexec.Adapter{
InstancePrefix: *remoteInstancePrefix,
ExecTimeout: 15 * time.Minute,
@@ -356,7 +364,8 @@
ToolName: "goma/exec-server",
ToolVersion: "0.0.0-experimental",
},
- FileLookupConcurrency: *fileLookupConcurrency,
+ FileLookupSema: make(chan struct{}, *fileLookupConcurrency),
+ CASBlobLookupSema: make(chan struct{}, casBlobLookupConcurrency),
}
if *cmdFilesBucket == "" {
@@ -400,8 +409,16 @@
}()
confServer = nullServer{ch: make(chan error)}
- case *configMapURI != "":
- cs, err := newConfigServer(ctx, inventory, *configMapURI, gsclient, opts...)
+ case *toolchainConfigBucket != "":
+ cm := &cmdpb.ConfigMap{}
+ if *configMap != "" {
+ err := proto.UnmarshalText(*configMap, cm)
+ if err != nil {
+ ready <- fmt.Errorf("parse configmap %q: %v", *configMap, err)
+ return
+ }
+ }
+ cs, err := newConfigServer(ctx, inventory, *toolchainConfigBucket, *configMapFile, cm, gsclient, opts...)
if err != nil {
logger.Fatalf("configServer: %v", err)
}
diff --git a/cmd/execlog_server/main.go b/cmd/execlog_server/main.go
index 536f763..1383ee7 100644
--- a/cmd/execlog_server/main.go
+++ b/cmd/execlog_server/main.go
@@ -46,7 +46,7 @@
logger.Fatal(err)
}
trace.ApplyConfig(trace.Config{
- DefaultSampler: server.NewRemoteSampler(true, trace.NeverSample()),
+ DefaultSampler: server.NewLimitedSampler(server.DefaultTraceFraction, server.DefaultTraceQPS),
})
s, err := server.NewGRPC(*port,
diff --git a/cmd/file_server/main.go b/cmd/file_server/main.go
index 6f93419..e04ad03 100644
--- a/cmd/file_server/main.go
+++ b/cmd/file_server/main.go
@@ -38,7 +38,8 @@
cacheAddr = flag.String("file-cache-addr", "", "cache server address")
bucket = flag.String("bucket", "", "backing store bucket")
- traceProjectID = flag.String("trace-project-id", "", "project id for cloud tracing")
+ traceProjectID = flag.String("trace-project-id", "", "project id for cloud tracing")
+
serviceAccountFile = flag.String("service-account-file", "", "service account json file")
)
@@ -74,9 +75,8 @@
if err != nil {
logger.Fatal(err)
}
-
trace.ApplyConfig(trace.Config{
- DefaultSampler: server.NewRemoteSampler(true, trace.NeverSample()),
+ DefaultSampler: server.NewLimitedSampler(server.DefaultTraceFraction, server.DefaultTraceQPS),
})
s, err := server.NewGRPC(*port,
diff --git a/cmd/frontend/main.go b/cmd/frontend/main.go
index b55a84a..c02a5fb 100644
--- a/cmd/frontend/main.go
+++ b/cmd/frontend/main.go
@@ -59,10 +59,6 @@
traceProjectID = flag.String("trace-project-id", "", "project id for cloud tracing")
serviceAccountFile = flag.String("service-account-file", "", "service account json file")
- traceFraction = flag.Float64("trace-sampling-fraction", 1.0, "sampling fraction for stackdriver trace")
- // trace API limit is 4800/minutes.
- // 4800/60/(total number of frontend replicas in the project)
- traceQPS = flag.Float64("trace-sampling-qps-limit", 0.2, "sampling qps limit for stackdrvier trace")
memoryMargin = flag.String("memory-margin",
k8sapi.NewQuantity(maxMsgSize, k8sapi.BinarySI).String(),
@@ -136,7 +132,7 @@
logger.Fatal(err)
}
trace.ApplyConfig(trace.Config{
- DefaultSampler: server.NewLimitedSampler(*traceFraction, *traceQPS),
+ DefaultSampler: server.NewLimitedSampler(server.DefaultTraceFraction, server.DefaultTraceQPS),
})
s, err := server.NewGRPC(*gport,
diff --git a/cmd/remoteexec_proxy/main.go b/cmd/remoteexec_proxy/main.go
index de14351..9e19c6a 100644
--- a/cmd/remoteexec_proxy/main.go
+++ b/cmd/remoteexec_proxy/main.go
@@ -385,7 +385,8 @@
ToolName: "remoteexec_proxy",
ToolVersion: "0.0.0-experimental",
},
- FileLookupConcurrency: 2,
+ FileLookupSema: make(chan struct{}, 2),
+ CASBlobLookupSema: make(chan struct{}, 20),
}
configResp := &cmdpb.ConfigResp{
diff --git a/command/configmap.go b/command/configmap.go
index cd86678..695e446 100644
--- a/command/configmap.go
+++ b/command/configmap.go
@@ -9,6 +9,7 @@
"context"
"errors"
"fmt"
+ "io/ioutil"
"math/rand"
"path"
"sort"
@@ -40,7 +41,7 @@
{
Description: "configmap pubsub error",
Measure: pubsubErrors,
- Aggregation: view.LastValue(),
+ Aggregation: view.Count(),
},
}
)
@@ -51,9 +52,6 @@
//
// if seq is updated from last load, it will load CmdDescriptor
// from <bucket>/<runtime>/<prebuilt_item>/descriptors/<descriptorHash>.
-//
-// also loads platform properties for remoteexec API
-// from <bucket>/<runtime>/remoteexec-platform/<property-name>.
type ConfigMapLoader struct {
ConfigMap ConfigMap
ConfigLoader ConfigLoader
@@ -89,23 +87,21 @@
// <bucket> is <project>-toolchain-config.
// in the <bucket>
//
-// <config>.config: text proto ConfigMap
-//
// <runtime>/
// seq: text, sequence number.
// <prebuilt-item>/descriptors/<descriptorHash>: proto CmdDescriptor
-// remoteexec-platform/<property-name>: text, property-value
-//
-// <bucket> might have several <config>.config, and each config might
-// have different set of runtime etc.
//
// Watcher watches */seq files via default notification topic on the bucket.
-// Seqs and RuntimeConfigs will read <config>.config everytime.
+// Seqs and RuntimeConfigs will read ConfigMapFile everytime.
type ConfigMapBucket struct {
// URI of config data.
- // gs://<bucket>/<config>.config
- // e.g. gs://$project-toolchain-config/$name.config
- URI string
+ // gs://<bucket>/
+ // e.g. gs://$project-toolchain-config/
+ URI string
+
+ ConfigMap *cmdpb.ConfigMap
+ ConfigMapFile string
+
PubsubClient *pubsub.Client
// StorageClient is an interface for accessing Cloud Storage. It can
@@ -226,20 +222,18 @@
}
func (c ConfigMapBucket) configMap(ctx context.Context) (*cmdpb.ConfigMap, error) {
- bucket, obj, err := splitGCSPath(c.URI)
+ if c.ConfigMapFile == "" {
+ return proto.Clone(c.ConfigMap).(*cmdpb.ConfigMap), nil
+ }
+ buf, err := ioutil.ReadFile(c.ConfigMapFile)
if err != nil {
return nil, err
}
- buf, err := storageReadAll(ctx, c.StorageClient, bucket, obj)
+ err = proto.UnmarshalText(string(buf), c.ConfigMap)
if err != nil {
return nil, err
}
- cm := &cmdpb.ConfigMap{}
- err = proto.UnmarshalText(string(buf), cm)
- if err != nil {
- return nil, fmt.Errorf("parse %s: %v", c.URI, err)
- }
- return cm, nil
+ return proto.Clone(c.ConfigMap).(*cmdpb.ConfigMap), nil
}
func cloudStorageNotification(ctx context.Context, s stiface.Client, bucket string) (*storage.Notification, error) {
@@ -331,6 +325,7 @@
}
}
ctx, cancel := context.WithCancel(context.Background())
+ // TODO: watch configMapFile.
w := configMapBucketWatcher{
s: subscription,
cancel: cancel,
@@ -496,11 +491,13 @@
// Load loads toolchain config from <uri>.
// It sets rc.ServiceAddr as target addr.
func (c *ConfigLoader) Load(ctx context.Context, uri string, rc *cmdpb.RuntimeConfig) ([]*cmdpb.Config, error) {
- platform, err := loadRemoteexecPlatform(ctx, c.StorageClient, uri)
- if err != nil {
- return nil, err
+ platform := &cmdpb.RemoteexecPlatform{}
+ for _, p := range rc.Platform.GetProperties() {
+ platform.Properties = append(platform.Properties, &cmdpb.RemoteexecPlatform_Property{
+ Name: p.Name,
+ Value: p.Value,
+ })
}
- mergePlatformProperties(platform, rc.Platform)
platform.HasNsjail = rc.GetPlatformRuntimeConfig().GetHasNsjail()
confs, err := loadConfigs(ctx, c.StorageClient, uri, rc, platform)
@@ -625,46 +622,6 @@
return d, nil
}
-func loadRemoteexecPlatform(ctx context.Context, client stiface.Client, uri string) (*cmdpb.RemoteexecPlatform, error) {
- logger := log.FromContext(ctx)
- bucket, obj, err := splitGCSPath(uri)
- if err != nil {
- return nil, err
- }
- obj = path.Join(obj, "remoteexec-platform")
-
- bkt := client.Bucket(bucket)
- if bkt == nil {
- return nil, fmt.Errorf("could not find storage bucket %s", bucket)
- }
- iter := bkt.Objects(ctx, &storage.Query{
- Prefix: obj,
- })
-
- // pagination?
- platform := &cmdpb.RemoteexecPlatform{}
- logger.Infof("load remoteexec-platform from %s", bucket)
- for {
- attr, err := iter.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- return nil, fmt.Errorf("iter %s/%s: %v", bucket, obj, err)
- }
- buf, err := storageReadAll(ctx, client, bucket, attr.Name)
- if err != nil {
- return nil, fmt.Errorf("load %s: %v", attr.Name, err)
- }
- platform.Properties = append(platform.Properties, &cmdpb.RemoteexecPlatform_Property{
- Name: path.Base(attr.Name),
- Value: strings.TrimSpace(string(buf)),
- })
- }
- logger.Infof("loaded remoteexec-platform from %s: %s", bucket, platform)
- return platform, nil
-}
-
func checkPrebuilt(rc *cmdpb.RuntimeConfig, objName string) error {
// objName will be <runtime>/<prebuilts>/descriptors/<hash>
i := strings.Index(objName, "/descriptors")
diff --git a/command/normalizer/normalizer.go b/command/normalizer/normalizer.go
index aa03931..5af46e9 100644
--- a/command/normalizer/normalizer.go
+++ b/command/normalizer/normalizer.go
@@ -37,7 +37,7 @@
}
i := len(tokens) - 1
switch tokens[i] {
- case "eabi", "gnu", "gnueabi", "macho", "android", "androideabi", "uclibc", "msvc":
+ case "eabi", "gnu", "gnueabi", "gnueabihf", "macho", "android", "androideabi", "uclibc", "msvc":
env = tokens[i]
i--
}
diff --git a/command/normalizer/normalizer_test.go b/command/normalizer/normalizer_test.go
index 98a665c..058ca57 100644
--- a/command/normalizer/normalizer_test.go
+++ b/command/normalizer/normalizer_test.go
@@ -56,6 +56,16 @@
},
},
{
+ input: "armv7a-cros-linux-gnueabihf",
+ want: target{
+ arch: "armv7a",
+ archType: "armv7a",
+ vendor: "cros",
+ os: "linux",
+ env: "gnueabihf",
+ },
+ },
+ {
input: "i486-linux-gnu",
want: target{
arch: "i486",
diff --git a/go.mod b/go.mod
index 9383ed5..4084ef5 100644
--- a/go.mod
+++ b/go.mod
@@ -3,28 +3,28 @@
go 1.12
require (
- cloud.google.com/go v0.49.0
+ cloud.google.com/go v0.52.0
cloud.google.com/go/pubsub v1.1.0
- cloud.google.com/go/storage v1.4.0
- contrib.go.opencensus.io/exporter/stackdriver v0.12.8
- github.com/bazelbuild/remote-apis v0.0.0-20190606163526-a5c577357528
+ cloud.google.com/go/storage v1.5.0
+ contrib.go.opencensus.io/exporter/stackdriver v0.12.9
+ github.com/bazelbuild/remote-apis v0.0.0-20191104140458-e77c4eb2ca48
+ github.com/bazelbuild/remote-apis-sdks v0.0.0-20200117155253-d02017f96d3b
github.com/fsnotify/fsnotify v1.4.7
- github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
- github.com/golang/protobuf v1.3.2
+ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7
+ github.com/golang/protobuf v1.3.3
github.com/gomodule/redigo v2.0.0+incompatible
- github.com/google/go-cmp v0.3.1
- github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect
+ github.com/google/go-cmp v0.4.0
github.com/google/uuid v1.1.1
github.com/googleapis/gax-go/v2 v2.0.5
github.com/googleapis/google-cloud-go-testing v0.0.0-20190904031503-2d24dde44ba5
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
go.opencensus.io v0.22.2
go.uber.org/zap v1.10.0
- golang.org/x/build v0.0.0-20190830211429-e21f1db94c01
- golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2
- golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
+ golang.org/x/build v0.0.0-20191031202223-0706ea4fce0c
+ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa
+ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
- google.golang.org/api v0.14.0
- google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9
- google.golang.org/grpc v1.25.1
+ google.golang.org/api v0.15.0
+ google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba
+ google.golang.org/grpc v1.27.0
)
diff --git a/go.sum b/go.sum
index 8750e77..f9b193a 100644
--- a/go.sum
+++ b/go.sum
@@ -10,10 +10,14 @@
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.49.0 h1:CH+lkubJzcPYB1Ggupcq0+k8Ni2ILdG2lYjDIgavDBQ=
-cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY=
+cloud.google.com/go v0.47.0/go.mod h1:5p3Ky/7f3N10VBkhuR5LFtddroTiMyjZV/Kj5qOQFxU=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=
@@ -22,40 +26,72 @@
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.4.0 h1:KDdqY5VTXBTqpSbctVTt0mVvfanP6JZzNzLE0qNY100=
-cloud.google.com/go/storage v1.4.0/go.mod h1:ZusYJWlOshgSBGbt6K3GnB3MT3H1xs2id9+TCl4fDBA=
-contrib.go.opencensus.io/exporter/stackdriver v0.12.8 h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=
-contrib.go.opencensus.io/exporter/stackdriver v0.12.8/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
+cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+contrib.go.opencensus.io/exporter/stackdriver v0.12.9 h1:ZRVpDigsb+nVI/yps/NLDOYzYjFFmm3OCsBhmYocxR0=
+contrib.go.opencensus.io/exporter/stackdriver v0.12.9/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes=
+github.com/aclements/go-moremath v0.0.0-20190830160640-d16893ddf098/go.mod h1:idZL3yvz4kzx1dsBOAC+oYv6L92P1oFEhUXUB1A/lwQ=
+github.com/ajstarks/deck v0.0.0-20191009173945-82d717002242/go.mod h1:j3f/59diR4DorW5A78eDYvRkdrkh+nps4p5LA1Tl05U=
+github.com/ajstarks/svgo v0.0.0-20190826172357-de52242f3d65/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/aws/aws-sdk-go v1.23.20 h1:2CBuL21P0yKdZN5urf2NxKa1ha8fhnY+A3pBCHFeZoA=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/bazelbuild/remote-apis v0.0.0-20190606163526-a5c577357528 h1:/+plLAl5S7giewLlFr2GlBm1AwPcirP4QnUssKsA7KQ=
-github.com/bazelbuild/remote-apis v0.0.0-20190606163526-a5c577357528/go.mod h1:9Y+1FnaNUGVV6wKE0Jdh+mguqDUsyd9uUqokalrC7DQ=
+github.com/bazelbuild/remote-apis v0.0.0-20191104140458-e77c4eb2ca48 h1:bgj+Oufa8F4rCHe/8omhml7cBlg3VmNhF66ed1vT2Bw=
+github.com/bazelbuild/remote-apis v0.0.0-20191104140458-e77c4eb2ca48/go.mod h1:9Y+1FnaNUGVV6wKE0Jdh+mguqDUsyd9uUqokalrC7DQ=
+github.com/bazelbuild/remote-apis-sdks v0.0.0-20200117155253-d02017f96d3b h1:Vy+DZZyPUNv2Ki15vJ+JdJRPg4LzJm52i8//9pfqPEU=
+github.com/bazelbuild/remote-apis-sdks v0.0.0-20200117155253-d02017f96d3b/go.mod h1:sAMybttCdA6S0dbSG/xluhJY6D3qlEkAkgfcyOyRee4=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cznic/cc v0.0.0-20181122101902-d673e9b70d4d/go.mod h1:m3fD/V+XTB35Kh9zw6dzjMY+We0Q7PMf6LLIC4vuG9k=
+github.com/cznic/fileutil v0.0.0-20181122101858-4d67cfea8c87/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
+github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
+github.com/cznic/internal v0.0.0-20181122101858-3279554c546e/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4=
+github.com/cznic/ir v0.0.0-20181122101859-da7ba2ecce8b/go.mod h1:bctvsSxTD8Lpaj5RRQ0OrAAu4+0mD4KognDQItBNMn0=
+github.com/cznic/lex v0.0.0-20181122101858-ce0fb5e9bb1b/go.mod h1:LcYbbl1tn/c31gGxe2EOWyzr7EaBcdQOoIVGvJMc7Dc=
+github.com/cznic/lexer v0.0.0-20181122101858-e884d4bd112e/go.mod h1:YNGh5qsZlhFHDfWBp/3DrJ37Uy4pRqlwxtL+LS7a/Qw=
+github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
+github.com/cznic/xc v0.0.0-20181122101856-45b06973881e/go.mod h1:3oFoiOvCDBYH+swwf5+k/woVmWy7h1Fcyu8Qig/jjX0=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/disintegration/gift v1.2.1/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -66,8 +102,16 @@
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc=
+github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg=
+github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks=
+github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A=
+github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
@@ -76,6 +120,10 @@
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.2-0.20191028172631-481baca67f93 h1:VvBteXw2zOXEgm0o3PgONTWf+bhUGsCaiNn3pbkU9LA=
+github.com/google/go-cmp v0.3.2-0.20191028172631-481baca67f93/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
@@ -84,11 +132,15 @@
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 h1:XTnP8fJpa4Kvpw2qARB4KS9izqxPS0Sd92cDlY3uk+w=
-github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191028172815-5e965273ee43/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
+github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
@@ -102,11 +154,17 @@
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.13.0/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
@@ -117,23 +175,38 @@
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -150,22 +223,30 @@
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
-golang.org/x/build v0.0.0-20190830211429-e21f1db94c01 h1:j3Fv9IbIXFm9ao/xY0fMEp+cX74cZ6f/7UWv8fPJ69s=
-golang.org/x/build v0.0.0-20190830211429-e21f1db94c01/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM=
+golang.org/x/build v0.0.0-20191031202223-0706ea4fce0c h1:jjNoDZTS0vmbqBhqD5MPXauZW+kcGyflfDDFBNCPSVI=
+golang.org/x/build v0.0.0-20191031202223-0706ea4fce0c/go.mod h1:Nl5grlQor/lxfX9FfGLe+g2cVSCiURG36KQgsg/ODs4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -175,12 +256,17 @@
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mobile v0.0.0-20191031020345-0945064e013a/go.mod h1:p895TfNkDgPEmEQrNiOtIl3j98d/tGU95djDj7NfyjQ=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -191,14 +277,24 @@
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 h1:4dVFTC832rPn4pomLSz1vA+are2+dU19w1H8OngV7nc=
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -210,6 +306,7 @@
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -221,8 +318,15 @@
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8 h1:41hwlulw1prEMBxLQSlMSux1zxJf07B3WPsdjJlKZxE=
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191104094858-e8c54fb511f6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 h1:JA8d3MPx/IToSyXZG/RhwYEtfrKO1Fxrqe8KrkiLXKM=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -230,6 +334,7 @@
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -245,11 +350,24 @@
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190909214602-067311248421/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191010171213-8abd42400456/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -258,8 +376,11 @@
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ=
google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -268,6 +389,8 @@
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -282,8 +405,16 @@
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
@@ -293,14 +424,20 @@
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
diff --git a/proto/auth/auth_service.pb.go b/proto/auth/auth_service.pb.go
index 827165d..3adfe19 100644
--- a/proto/auth/auth_service.pb.go
+++ b/proto/auth/auth_service.pb.go
@@ -38,11 +38,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// AuthServiceClient is the client API for AuthService service.
//
@@ -52,10 +52,10 @@
}
type authServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewAuthServiceClient(cc *grpc.ClientConn) AuthServiceClient {
+func NewAuthServiceClient(cc grpc.ClientConnInterface) AuthServiceClient {
return &authServiceClient{cc}
}
diff --git a/proto/auth/authdb_service.pb.go b/proto/auth/authdb_service.pb.go
index 6cfc35b..30bb448 100644
--- a/proto/auth/authdb_service.pb.go
+++ b/proto/auth/authdb_service.pb.go
@@ -40,11 +40,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// AuthDBServiceClient is the client API for AuthDBService service.
//
@@ -54,10 +54,10 @@
}
type authDBServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewAuthDBServiceClient(cc *grpc.ClientConn) AuthDBServiceClient {
+func NewAuthDBServiceClient(cc grpc.ClientConnInterface) AuthDBServiceClient {
return &authDBServiceClient{cc}
}
diff --git a/proto/cache/cache_service.pb.go b/proto/cache/cache_service.pb.go
index e856ad6..5987fcb 100644
--- a/proto/cache/cache_service.pb.go
+++ b/proto/cache/cache_service.pb.go
@@ -39,11 +39,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// CacheServiceClient is the client API for CacheService service.
//
@@ -54,10 +54,10 @@
}
type cacheServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewCacheServiceClient(cc *grpc.ClientConn) CacheServiceClient {
+func NewCacheServiceClient(cc grpc.ClientConnInterface) CacheServiceClient {
return &cacheServiceClient{cc}
}
diff --git a/proto/command/command.pb.go b/proto/command/command.pb.go
index a77e38f..48447c6 100644
--- a/proto/command/command.pb.go
+++ b/proto/command/command.pb.go
@@ -587,6 +587,7 @@
// Basename of RBE instance to use. e.g. "default_instance" or "windows".
RbeInstanceBasename string `protobuf:"bytes,2,opt,name=rbe_instance_basename,json=rbeInstanceBasename,proto3" json:"rbe_instance_basename,omitempty"`
// Set true if nsjail is available in the platform image.
+ // TODO: deprecated. always requires najail on linux platform.
HasNsjail bool `protobuf:"varint,3,opt,name=has_nsjail,json=hasNsjail,proto3" json:"has_nsjail,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -860,7 +861,6 @@
// the following files will be detected in this name directory in the bucket:
// seq
// <prebuilt-item>/descriptors/<descriptorHash>
- // remoteexec-platform/<property-name>
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// service address for the runtime. i.e. RBE address.
ServiceAddr string `protobuf:"bytes,2,opt,name=service_addr,json=serviceAddr,proto3" json:"service_addr,omitempty"`
@@ -869,10 +869,8 @@
// This is selector to use this runtime. i.e. if client request contains
// the dimentions, this runtime config will be selected.
PlatformRuntimeConfig *PlatformRuntimeConfig `protobuf:"bytes,6,opt,name=platform_runtime_config,json=platformRuntimeConfig,proto3" json:"platform_runtime_config,omitempty"`
- // Platform is a set of requirements, such as haredware, operting system
+ // Platform is a set of requirements, such as hardware, operating system
// for RBE backend.
- // property files stored in remoteexec-platform/ in the bucket will be
- // merged into this.
Platform *Platform `protobuf:"bytes,8,opt,name=platform,proto3" json:"platform,omitempty"`
// prebuilts prefix to allow.
// if allowed_prebuilts specified, only prebuilts that are matched
diff --git a/proto/command/command.proto b/proto/command/command.proto
index bd2e2cd..3852a28 100644
--- a/proto/command/command.proto
+++ b/proto/command/command.proto
@@ -167,6 +167,7 @@
string rbe_instance_basename = 2;
// Set true if nsjail is available in the platform image.
+ // TODO: deprecated. always requires najail on linux platform.
bool has_nsjail = 3;
}
@@ -204,7 +205,6 @@
// the following files will be detected in this name directory in the bucket:
// seq
// <prebuilt-item>/descriptors/<descriptorHash>
- // remoteexec-platform/<property-name>
string name = 1;
// service address for the runtime. i.e. RBE address.
@@ -219,10 +219,8 @@
reserved 7;
reserved "rbe_instance_basename";
- // Platform is a set of requirements, such as haredware, operting system
+ // Platform is a set of requirements, such as hardware, operating system
// for RBE backend.
- // property files stored in remoteexec-platform/ in the bucket will be
- // merged into this.
Platform platform = 8;
// go/goma-toolchain-filter-per-cluster
diff --git a/proto/exec/exec_service.pb.go b/proto/exec/exec_service.pb.go
index cfa34f3..c1d507d 100644
--- a/proto/exec/exec_service.pb.go
+++ b/proto/exec/exec_service.pb.go
@@ -111,11 +111,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// ExecServiceClient is the client API for ExecService service.
//
@@ -125,10 +125,10 @@
}
type execServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewExecServiceClient(cc *grpc.ClientConn) ExecServiceClient {
+func NewExecServiceClient(cc grpc.ClientConnInterface) ExecServiceClient {
return &execServiceClient{cc}
}
diff --git a/proto/execlog/log_service.pb.go b/proto/execlog/log_service.pb.go
index 1384f2d..f10d5ac 100644
--- a/proto/execlog/log_service.pb.go
+++ b/proto/execlog/log_service.pb.go
@@ -42,11 +42,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// LogServiceClient is the client API for LogService service.
//
@@ -56,10 +56,10 @@
}
type logServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewLogServiceClient(cc *grpc.ClientConn) LogServiceClient {
+func NewLogServiceClient(cc grpc.ClientConnInterface) LogServiceClient {
return &logServiceClient{cc}
}
diff --git a/proto/file/file_service.pb.go b/proto/file/file_service.pb.go
index 990b420..b2b7902 100644
--- a/proto/file/file_service.pb.go
+++ b/proto/file/file_service.pb.go
@@ -43,11 +43,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// FileServiceClient is the client API for FileService service.
//
@@ -58,10 +58,10 @@
}
type fileServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewFileServiceClient(cc *grpc.ClientConn) FileServiceClient {
+func NewFileServiceClient(cc grpc.ClientConnInterface) FileServiceClient {
return &fileServiceClient{cc}
}
diff --git a/proto/settings/settings_service.pb.go b/proto/settings/settings_service.pb.go
index c1d5b39..38aa3f5 100644
--- a/proto/settings/settings_service.pb.go
+++ b/proto/settings/settings_service.pb.go
@@ -39,11 +39,11 @@
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
// SettingsServiceClient is the client API for SettingsService service.
//
@@ -53,10 +53,10 @@
}
type settingsServiceClient struct {
- cc *grpc.ClientConn
+ cc grpc.ClientConnInterface
}
-func NewSettingsServiceClient(cc *grpc.ClientConn) SettingsServiceClient {
+func NewSettingsServiceClient(cc grpc.ClientConnInterface) SettingsServiceClient {
return &settingsServiceClient{cc}
}
diff --git a/remoteexec/adapter.go b/remoteexec/adapter.go
index ec73df0..e7d39d3 100644
--- a/remoteexec/adapter.go
+++ b/remoteexec/adapter.go
@@ -66,10 +66,13 @@
// Tool details put in request metadata.
ToolDetails *rpb.ToolDetails
- // FileLookupConcurrency represents concurrency to look up file
+ // FileLookupSema specifies concurrency to look up file
// contents from file-cache-server to be converted to CAS.
- // 1 if not specified.
- FileLookupConcurrency int
+ FileLookupSema chan struct{}
+
+ // CASBlobLookupSema specifies concurrency to look up file blobs in in cas.lookupBlobsInStore(),
+ // which calls Store.Get().
+ CASBlobLookupSema chan struct{}
capMu sync.Mutex
capabilities *rpb.ServerCapabilities
@@ -156,7 +159,7 @@
client.CallOptions = append(client.CallOptions,
grpc.PerRPCCredentials(oauth.NewOauthAccess(token)))
- maxBytes := int64(cas.DefaultBatchLimit)
+ maxBytes := int64(cas.DefaultBatchByteLimit)
if s := f.capabilities.GetCacheCapabilities().GetMaxBatchTotalSizeBytes(); s > maxBytes {
maxBytes = s
}
@@ -297,18 +300,26 @@
addTimestamp("check cache", time.Since(t))
if !cached {
t = time.Now()
- blobs := r.missingBlobs(ctx)
+ blobs, err := r.missingBlobs(ctx)
addTimestamp("check missing", time.Since(t))
+ if err != nil {
+ logger.Errorf("error in check missing blobs: %v", err)
+ return nil, err
+ }
+
t = time.Now()
- resp := r.uploadBlobs(ctx, blobs)
+ resp, err = r.uploadBlobs(ctx, blobs)
addTimestamp("upload blobs", time.Since(t))
+ if err != nil {
+ logger.Errorf("error in upload blobs: %v", err)
+ return nil, err
+ }
if resp != nil {
logger.Infof("fail fast for uploading missing blobs: %v", resp)
return resp, nil
}
t = time.Now()
- var err error
eresp, err = r.executeAction(ctx)
addTimestamp("execute", time.Since(t))
if err != nil {
diff --git a/remoteexec/adapter_test.go b/remoteexec/adapter_test.go
index 731dec6..989d782 100644
--- a/remoteexec/adapter_test.go
+++ b/remoteexec/adapter_test.go
@@ -1049,6 +1049,10 @@
Value: "/b/c/w",
},
{
+ Name: "PWD",
+ Value: "/b/c/w/out/Debug",
+ },
+ {
Name: "WORK_DIR",
Value: "out/Debug",
},
@@ -1066,8 +1070,8 @@
t.Fatalf("err %v", err)
}
- // files and executables might contain extra "out/Release/run.sh".
- wantFiles := []string{"out/Debug/run.sh", "out/Debug/env_file_for_docker", "bin/clang", "include/hello.h", "src/hello.c"}
+ // files and executables might contain extra "out/Debug/run.sh".
+ wantFiles := []string{"out/Debug/run.sh", "bin/clang", "include/hello.h", "src/hello.c"}
wantExecutables := []string{"bin/clang", "out/Debug/run.sh"}
for _, f := range wantFiles {
@@ -1081,12 +1085,9 @@
}
}
- if got, want := files["out/Debug/run.sh"].digest, digest.Bytes("wrapper-script", []byte(wrapperScript)).Digest(); !proto.Equal(got, want) {
+ if got, want := files["out/Debug/run.sh"].digest, digest.Bytes("wrapper-script", []byte(bindMountWrapperScript)).Digest(); !proto.Equal(got, want) {
t.Errorf("digest of out/Debug/run.sh: %s != %s", got, want)
}
- if got, want := files["out/Debug/env_file_for_docker"].digest, digest.Bytes("envfile", []byte("PWD=/b/c/w/out/Debug")).Digest(); !proto.Equal(got, want) {
- t.Errorf("digest of out/Debug/env_file_for_docker: %s != %s", got, want)
- }
}
// TODO: add test for ATS+chroot case using symlinks.
@@ -1107,7 +1108,11 @@
args: []string{"-g"},
want: []*rpb.Platform_Property{
{
- Name: "dockerSiblingContainers",
+ Name: "dockerPrivileged",
+ Value: "true",
+ },
+ {
+ Name: "dockerRunAsRoot",
Value: "true",
},
},
diff --git a/remoteexec/cas/cas.go b/remoteexec/cas/cas.go
index 17508bf..e64cf46 100644
--- a/remoteexec/cas/cas.go
+++ b/remoteexec/cas/cas.go
@@ -8,9 +8,11 @@
"errors"
"fmt"
"sort"
+ "sync"
"time"
rpb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
+ "github.com/golang/protobuf/proto"
"go.opencensus.io/trace"
bpb "google.golang.org/genproto/googleapis/bytestream"
"google.golang.org/grpc"
@@ -24,8 +26,10 @@
)
const (
- // DefaultBatchLimit is bytes limit for cas BatchUploadBlobs.
- DefaultBatchLimit = 4 * 1024 * 1024
+ // DefaultBatchByteLimit is bytes limit for cas BatchUploadBlobs.
+ DefaultBatchByteLimit = 4 * 1024 * 1024
+ // BatchBlobLimit is max number of blobs in BatchUploadBlobs.
+ batchBlobLimit = 1000
)
// Client is a client of cas service.
@@ -99,102 +103,201 @@
return fmt.Sprintf("missing %d blobs", len(e.Blobs))
}
-// Upload uploads blobs stored in Store to instance of cas service.
-func (c CAS) Upload(ctx context.Context, instance string, blobs ...*rpb.Digest) error {
- span := trace.FromContext(ctx)
- logger := log.FromContext(ctx)
- logger.Infof("upload blobs %v", blobs)
- // sort by size_bytes.
+func separateBlobsByByteLimit(blobs []*rpb.Digest, instance string, byteLimit int64) ([]*rpb.Digest, []*rpb.Digest) {
+ if len(blobs) == 0 {
+ return nil, nil
+ }
+
sort.Slice(blobs, func(i, j int) bool {
return blobs[i].SizeBytes < blobs[j].SizeBytes
})
- var missing MissingError
- // up to max_batch_total_size_bytes, use BatchUpdateBlobs.
- // more than this, use bytestream.Write.
-
- // TODO: better packing
- var i int
-
- batchLimit := int64(DefaultBatchLimit)
- if c.CacheCapabilities != nil && c.CacheCapabilities.MaxBatchTotalSizeBytes > 0 {
- batchLimit = c.CacheCapabilities.MaxBatchTotalSizeBytes
+ // Create dummy data to check protobuf size. To avoid redundant allocations, find the largest digest size.
+ maxSizeBytes := blobs[len(blobs)-1].SizeBytes
+ dummyReq := &rpb.BatchUpdateBlobsRequest{
+ InstanceName: instance,
+ Requests: []*rpb.BatchUpdateBlobsRequest_Request{{Data: make([]byte, 1, maxSizeBytes)}},
}
-Loop:
- for i < len(blobs) {
- if blobs[i].SizeBytes >= batchLimit {
- break
+
+ for i, blob := range blobs {
+ // Create dummy data to check protobuf size.
+ dummyReq.Requests[0].Digest = blob
+ dummyReq.Requests[0].Data = dummyReq.Requests[0].Data[:blob.SizeBytes]
+ if int64(proto.Size(dummyReq)) >= byteLimit {
+ return blobs[:i], blobs[i:]
}
- batchReq := &rpb.BatchUpdateBlobsRequest{
- InstanceName: instance,
- }
- var size int64
- i0 := i
- for ; i < len(blobs); i++ {
- size += blobs[i].SizeBytes
- if size >= batchLimit {
- break
- }
- data, ok := c.Store.Get(blobs[i])
+ }
+ // All blobs have protobuf size below `byteLimit`.
+ return blobs, []*rpb.Digest{}
+}
+
+func lookupBlobsInStore(ctx context.Context, blobs []*rpb.Digest, store *digest.Store, sema chan struct{}) ([]*rpb.BatchUpdateBlobsRequest_Request, []MissingBlob) {
+ span := trace.FromContext(ctx)
+
+ var wg sync.WaitGroup
+
+ type blobLookupResult struct {
+ err error
+ req *rpb.BatchUpdateBlobsRequest_Request
+ }
+ results := make([]blobLookupResult, len(blobs))
+
+ for i := range blobs {
+ wg.Add(1)
+ go func(blob *rpb.Digest, result *blobLookupResult) {
+ defer wg.Done()
+ sema <- struct{}{}
+ defer func() {
+ <-sema
+ }()
+
+ data, ok := store.Get(blob)
if !ok {
- span.Annotatef(nil, "blob not found in cas: %v", blobs[i])
- missing.Blobs = append(missing.Blobs, MissingBlob{
- Digest: blobs[i],
- Err: errBlobNotInReq,
- })
- continue
+ span.Annotatef(nil, "blob not found in cas: %v", blob)
+ result.err = errBlobNotInReq
+ return
}
b, err := datasource.ReadAll(ctx, data)
if err != nil {
- span.Annotatef(nil, "blob data for %v: %v", blobs[i], err)
- missing.Blobs = append(missing.Blobs, MissingBlob{
- Digest: blobs[i],
- Err: err,
- })
- continue
+ span.Annotatef(nil, "blob data for %v: %v", blob, err)
+ result.err = err
+ return
}
- batchReq.Requests = append(batchReq.Requests, &rpb.BatchUpdateBlobsRequest_Request{
+ // TODO: This is inefficient because we are reading all
+ // sources whether or not they are going to be returned, due to the
+ // size computation happening later. This might be okay as long as
+ // we are not reading too much extra data in one operation.
+ //
+ // We should instead return all blob requests for blobs < `byteLimit`,
+ // batched into multiple BatchUpdateBlobsRequests.
+ result.req = &rpb.BatchUpdateBlobsRequest_Request{
Digest: data.Digest(),
Data: b,
- })
- }
- logger.Infof("upload by batch [%d,%d) out of %d", i0, i, len(blobs))
- t := time.Now()
- span.Annotatef(nil, "batch update %d blobs", len(batchReq.Requests))
- // TODO: should we report rpc error as missing input too?
- var batchResp *rpb.BatchUpdateBlobsResponse
- err := rpc.Retry{}.Do(ctx, func() error {
- var err error
- batchResp, err = c.Client.CAS().BatchUpdateBlobs(ctx, batchReq)
- return fixRBEInternalError(err)
- })
- if err != nil {
- if grpc.Code(err) == codes.ResourceExhausted {
- // gRPC returns ResourceExhausted if request message is larger than max.
- logger.Warnf("upload by batch [%d,%d): %v", i0, i, err)
- // try with bytestream.
- // TODO: retry with fewer blobs?
- i = i0
- break Loop
}
-
- return grpc.Errorf(grpc.Code(err), "batch update blobs: %v", err)
- }
- for _, res := range batchResp.Responses {
- if codes.Code(res.Status.Code) != codes.OK {
- span.Annotatef(nil, "batch update blob %v: %v", res.Digest, res.Status)
- return grpc.Errorf(codes.Code(res.Status.Code), "batch update blob %v: %v", res.Digest, res.Status)
- }
- }
- logger.Infof("upload by batch %d blobs in %s", len(batchReq.Requests), time.Since(t))
+ }(blobs[i], &results[i])
}
- logger.Infof("upload by streaming from %d out of %d", i, len(blobs))
- for ; i < len(blobs); i++ {
- data, ok := c.Store.Get(blobs[i])
+ wg.Wait()
+
+ var reqs []*rpb.BatchUpdateBlobsRequest_Request
+ var missingBlobs []MissingBlob
+
+ logger := log.FromContext(ctx)
+ for i, result := range results {
+ blob := blobs[i]
+ if result.err != nil {
+ missingBlobs = append(missingBlobs, MissingBlob{
+ Digest: blob,
+ Err: result.err,
+ })
+ continue
+ }
+ if result.req != nil {
+ reqs = append(reqs, result.req)
+ continue
+ }
+ logger.Errorf("Lookup of blobs[%d]=%v yielded neither error nor request", i, blob)
+ }
+ return reqs, missingBlobs
+}
+
+func createBatchUpdateBlobsRequests(blobReqs []*rpb.BatchUpdateBlobsRequest_Request, instance string, byteLimit int64) []*rpb.BatchUpdateBlobsRequest {
+ var batchReqs []*rpb.BatchUpdateBlobsRequest
+
+ batchReqNoReqsSize := int64(proto.Size(&rpb.BatchUpdateBlobsRequest{InstanceName: instance}))
+ size := batchReqNoReqsSize
+
+ lastOffset := 0
+ for i := range blobReqs {
+ // This code assumes that all blobs in `blobReqs`, when added as the only element of
+ // `batchReq.Requests`, will keep the marshaled proto size of `batchReq` < `byteLimit`.
+ // If `byteLimit` is 0, then it is ignored.
+
+ // Determine the extra proto size introduced by adding the current req.
+ size += int64(proto.Size(&rpb.BatchUpdateBlobsRequest{Requests: blobReqs[i : i+1]}))
+
+ // Add a new BatchUpdateBlobsRequest with blobs from the first blob after the
+ // previous BatchUpdateBlobsRequest up to and including the current blob, if:
+ // - this is the final blob
+ // - adding this blob reaches the blob count limit
+ // - adding the next blob pushes the size over the byte limit
+ switch {
+ case i == len(blobReqs)-1:
+ fallthrough
+ case i+1 == lastOffset+batchBlobLimit:
+ fallthrough
+ case byteLimit > 0 && size+int64(proto.Size(&rpb.BatchUpdateBlobsRequest{Requests: blobReqs[i+1 : i+2]})) > byteLimit:
+ batchReqs = append(batchReqs, &rpb.BatchUpdateBlobsRequest{
+ InstanceName: instance,
+ Requests: blobReqs[lastOffset : i+1],
+ })
+ size = batchReqNoReqsSize
+ lastOffset = i + 1
+ }
+ }
+ return batchReqs
+}
+
+// Upload uploads blobs stored in Store to instance of cas service.
+func (c CAS) Upload(ctx context.Context, instance string, sema chan struct{}, blobs ...*rpb.Digest) error {
+ span := trace.FromContext(ctx)
+ logger := log.FromContext(ctx)
+ logger.Infof("upload blobs %v", blobs)
+
+ // up to max_batch_total_size_bytes, use BatchUpdateBlobs.
+ // more than this, use bytestream.Write.
+ batchLimit := int64(DefaultBatchByteLimit)
+ if c.CacheCapabilities != nil && c.CacheCapabilities.MaxBatchTotalSizeBytes > 0 {
+ batchLimit = c.CacheCapabilities.MaxBatchTotalSizeBytes
+ }
+ smallBlobs, largeBlobs := separateBlobsByByteLimit(blobs, instance, batchLimit)
+
+ logger.Infof("upload by batch %d out of %d", len(smallBlobs), len(blobs))
+ blobReqs, missingBlobs := lookupBlobsInStore(ctx, smallBlobs, c.Store, sema)
+ missing := MissingError{
+ Blobs: missingBlobs,
+ }
+
+ batchReqs := createBatchUpdateBlobsRequests(blobReqs, instance, batchLimit)
+ for _, batchReq := range batchReqs {
+ uploaded := false
+ for !uploaded {
+ t := time.Now()
+ span.Annotatef(nil, "batch update %d blobs", len(batchReq.Requests))
+ // TODO: should we report rpc error as missing input too?
+ var batchResp *rpb.BatchUpdateBlobsResponse
+ err := rpc.Retry{}.Do(ctx, func() error {
+ var err error
+ batchResp, err = c.Client.CAS().BatchUpdateBlobs(ctx, batchReq)
+ return fixRBEInternalError(err)
+ })
+ if err != nil {
+ if grpc.Code(err) == codes.ResourceExhausted {
+ // gRPC returns ResourceExhausted if request message is larger than max.
+ logger.Warnf("upload by batch %d blobs: %v", len(batchReq.Requests), err)
+ // try with bytestream.
+ // TODO: retry with fewer blobs?
+ continue
+ }
+
+ return grpc.Errorf(grpc.Code(err), "batch update blobs: %v", err)
+ }
+ for _, res := range batchResp.Responses {
+ if codes.Code(res.Status.Code) != codes.OK {
+ span.Annotatef(nil, "batch update blob %v: %v", res.Digest, res.Status)
+ return grpc.Errorf(codes.Code(res.Status.Code), "batch update blob %v: %v", res.Digest, res.Status)
+ }
+ }
+ uploaded = true
+ logger.Infof("upload by batch %d blobs in %s", len(batchReq.Requests), time.Since(t))
+ }
+ }
+ logger.Infof("upload by streaming from %d out of %d", len(largeBlobs), len(blobs))
+ for _, blob := range largeBlobs {
+ data, ok := c.Store.Get(blob)
if !ok {
- span.Annotatef(nil, "blob not found in cas: %v", blobs[i])
+ span.Annotatef(nil, "blob not found in cas: %v", blob)
missing.Blobs = append(missing.Blobs, MissingBlob{
- Digest: blobs[i],
+ Digest: blob,
Err: errBlobNotInReq,
})
continue
@@ -202,14 +305,14 @@
err := rpc.Retry{}.Do(ctx, func() error {
rd, err := data.Open(ctx)
if err != nil {
- span.Annotatef(nil, "upload open %v: %v", blobs[i], err)
+ span.Annotatef(nil, "upload open %v: %v", blob, err)
missing.Blobs = append(missing.Blobs, MissingBlob{
- Digest: blobs[i],
+ Digest: blob,
Err: err,
})
return err
}
- err = UploadDigest(ctx, c.Client.ByteStream(), instance, blobs[i], rd)
+ err = UploadDigest(ctx, c.Client.ByteStream(), instance, blob, rd)
if err != nil {
rd.Close()
return fixRBEInternalError(err)
@@ -218,7 +321,7 @@
return nil
})
if err != nil {
- logger.Errorf("upload streaming %s error: %v", blobs[i], err)
+ logger.Errorf("upload streaming %s error: %v", blob, err)
continue
}
}
diff --git a/remoteexec/cas/cas_test.go b/remoteexec/cas/cas_test.go
new file mode 100644
index 0000000..43dcc05
--- /dev/null
+++ b/remoteexec/cas/cas_test.go
@@ -0,0 +1,799 @@
+// Copyright 2019 The Goma Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cas
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "reflect"
+ "testing"
+
+ rdigest "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
+ rpb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
+ "github.com/golang/protobuf/proto"
+ "github.com/google/go-cmp/cmp"
+
+ "go.chromium.org/goma/server/remoteexec/digest"
+)
+
+type blobData struct {
+ digest *rpb.Digest
+ data []byte
+}
+
+func makeBlobData(data string) *blobData {
+ hash := sha256.Sum256([]byte(data))
+ return &blobData{
+ digest: &rpb.Digest{
+ Hash: fmt.Sprintf("%x", hash),
+ SizeBytes: int64(len(data)),
+ },
+ data: []byte(data),
+ }
+}
+
+func getDigests(bds []*blobData) []*rpb.Digest {
+ var result []*rpb.Digest
+ for _, bd := range bds {
+ result = append(result, bd.digest)
+ }
+ return result
+}
+
+func concatDigests(digests ...[]*rpb.Digest) []*rpb.Digest {
+ var result []*rpb.Digest
+ for _, digest := range digests {
+ result = append(result, digest...)
+ }
+ return result
+}
+
+func blobDataToBatchUpdateReq(b *blobData) *rpb.BatchUpdateBlobsRequest_Request {
+ return &rpb.BatchUpdateBlobsRequest_Request{
+ Digest: b.digest,
+ Data: b.data,
+ }
+}
+
+func protoEqual(x, y interface{}) bool {
+ return cmp.Equal(x, y, cmp.Comparer(proto.Equal))
+}
+
+func TestMissing(t *testing.T) {
+ // Blobs already in CAS.
+ presentBlobs := []*blobData{
+ makeBlobData("5WGm1JJ1x77KSrlRgzxL"),
+ makeBlobData("ZJ0BiCaayupcdD2nRTmXXrre772lCF"),
+ makeBlobData("o2JzZO7qr6dwwR2CmXZtWDJ65ZkT885aruPAe0nm"),
+ }
+ // Blobs not in CAS.
+ missingBlobs := []*rpb.Digest{
+ {
+ Hash: "1a77aacc1ed3ea410230d66f1238d5a8",
+ SizeBytes: 50,
+ },
+ {
+ Hash: "bad2614f186bf481ee339896089825b5",
+ SizeBytes: 60,
+ },
+ {
+ Hash: "6f2bf26893e588575985446bf9fd116e",
+ SizeBytes: 70,
+ },
+ }
+
+ allBlobs := append(getDigests(presentBlobs), missingBlobs...)
+
+ for _, tc := range []struct {
+ desc string
+ blobs []*rpb.Digest
+ presentBlobs []*blobData
+ wantMissing []*rpb.Digest
+ }{
+ {
+ desc: "empty CAS",
+ blobs: allBlobs,
+ wantMissing: allBlobs,
+ },
+ {
+ desc: "only present blobs",
+ blobs: allBlobs[:3],
+ presentBlobs: presentBlobs,
+ },
+ {
+ desc: "present and missing blobs",
+ blobs: allBlobs,
+ presentBlobs: presentBlobs,
+ wantMissing: missingBlobs,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ instance := "instance"
+ fc, err := newFakeCASClient(0, instance)
+ defer fc.teardown()
+ if err != nil {
+ t.Errorf("err=%q, want nil", err)
+ return
+ }
+ for _, blob := range tc.presentBlobs {
+ fc.server.cas.Put(blob.data)
+ }
+
+ cas := CAS{Client: fc}
+ ctx := context.Background()
+ missing, err := cas.Missing(ctx, instance, tc.blobs)
+ if err != nil {
+ t.Errorf("err=%q; want nil", err)
+ }
+ if !protoEqual(missing, tc.wantMissing) {
+ t.Errorf("missing=%q; want=%q", missing, tc.wantMissing)
+ }
+ })
+ }
+}
+
+func TestSeparateBlobsByByteLimit(t *testing.T) {
+ blobs := []*rpb.Digest{
+ {
+ Hash: "5baa6de0968b9ef4607ea7c62f847c4b",
+ SizeBytes: 20,
+ },
+ {
+ Hash: "1acc7f1fc0f1c72e10f178f86b7d369b",
+ SizeBytes: 40,
+ },
+ {
+ Hash: "bad2614f186bf481ee339896089825b5",
+ SizeBytes: 60,
+ },
+ {
+ Hash: "87a890520c755d7b5fd322f6e3c487e2",
+ SizeBytes: 80,
+ },
+ {
+ Hash: "51656a4fad2e76ec95dd969d18e87994",
+ SizeBytes: 100,
+ },
+ {
+ Hash: "e0fe265acd2314151b4b5954ec1f748d",
+ SizeBytes: 130,
+ },
+ {
+ Hash: "1a77aacc1ed3ea410230d66f1238d5a8",
+ SizeBytes: 150,
+ },
+ {
+ Hash: "6f2bf26893e588575985446bf9fd116e",
+ SizeBytes: 170,
+ },
+ {
+ Hash: "4381b565d55c06d4021488ecaed98704",
+ SizeBytes: 190,
+ },
+ }
+
+ for _, tc := range []struct {
+ desc string
+ blobs []*rpb.Digest
+ byteLimit int64
+ wantSmall []*rpb.Digest
+ wantLarge []*rpb.Digest
+ }{
+ {
+ desc: "all small blobs",
+ blobs: []*rpb.Digest{
+ blobs[0],
+ blobs[7],
+ blobs[4],
+ blobs[6],
+ blobs[2],
+ blobs[3],
+ blobs[5],
+ blobs[8],
+ blobs[1],
+ },
+ byteLimit: 300,
+ wantSmall: blobs,
+ wantLarge: []*rpb.Digest{},
+ },
+ {
+ desc: "all large blobs",
+ blobs: []*rpb.Digest{
+ blobs[6],
+ blobs[0],
+ blobs[1],
+ blobs[2],
+ blobs[7],
+ blobs[4],
+ blobs[5],
+ blobs[8],
+ blobs[3],
+ },
+ byteLimit: 40,
+ wantSmall: []*rpb.Digest{},
+ wantLarge: blobs,
+ },
+ {
+ desc: "small and large blobs",
+ blobs: []*rpb.Digest{
+ blobs[5],
+ blobs[3],
+ blobs[7],
+ blobs[1],
+ blobs[2],
+ blobs[4],
+ blobs[0],
+ blobs[6],
+ blobs[8],
+ },
+ byteLimit: 150,
+ wantSmall: blobs[:4],
+ wantLarge: blobs[4:],
+ },
+ } {
+ instance := "default"
+ t.Run(tc.desc, func(t *testing.T) {
+ small, large := separateBlobsByByteLimit(tc.blobs, instance, tc.byteLimit)
+ if !protoEqual(small, tc.wantSmall) {
+ t.Errorf("small=%q; want %q", small, tc.wantSmall)
+ }
+ if !protoEqual(large, tc.wantLarge) {
+ t.Errorf("large=%q; want %q", large, tc.wantLarge)
+ }
+ })
+ }
+}
+
+func TestUpload(t *testing.T) {
+ // Blobs that are present in both local Store and file_server.
+ presentBlobs := []*blobData{
+ makeBlobData("5WGm1JJ1x77KSrlRgzxL"),
+ makeBlobData("ZJ0BiCaayupcdD2nRTmXXrre772lCF"),
+ makeBlobData("o2JzZO7qr6dwwR2CmXZtWDJ65ZkT885aruPAe0nm"),
+ }
+
+ // Blobs present on local Store but missing from file_server.
+ missingFileBlobs := []*rpb.Digest{
+ {
+ Hash: "1a77aacc1ed3ea410230d66f1238d5a8",
+ SizeBytes: 50,
+ }, {
+ Hash: "bad2614f186bf481ee339896089825b5",
+ SizeBytes: 60,
+ }, {
+ Hash: "6f2bf26893e588575985446bf9fd116e",
+ SizeBytes: 70,
+ },
+ }
+
+ // Blobs missing from local Store.
+ missingStoreBlobs := []*rpb.Digest{
+ {
+ Hash: "87a890520c755d7b5fd322f6e3c487e2",
+ SizeBytes: 80,
+ },
+ {
+ Hash: "4381b565d55c06d4021488ecaed98704",
+ SizeBytes: 90,
+ },
+ {
+ Hash: "51656a4fad2e76ec95dd969d18e87994",
+ SizeBytes: 100,
+ },
+ }
+
+ store := digest.NewStore()
+ for _, blob := range presentBlobs {
+ store.Set(makeFakeDigestData(blob.digest, blob.data))
+ }
+ for _, blob := range missingFileBlobs {
+ store.Set(makeFakeDigestData(blob, nil))
+ }
+
+ toString := func(d *rpb.Digest) string {
+ return fmt.Sprintf("%s/%d", d.Hash, d.SizeBytes)
+ }
+
+ for _, tc := range []struct {
+ desc string
+ blobs []*rpb.Digest
+ byteLimit int64
+ wantStored map[string][]byte
+ wantMissing []MissingBlob
+ wantNumBatchUpdates int
+ wantNumByteStreamWrites int
+ }{
+ {
+ desc: "present blobs",
+ blobs: getDigests(presentBlobs),
+ wantStored: map[string][]byte{
+ toString(presentBlobs[0].digest): presentBlobs[0].data,
+ toString(presentBlobs[1].digest): presentBlobs[1].data,
+ toString(presentBlobs[2].digest): presentBlobs[2].data,
+ },
+ wantNumBatchUpdates: 1,
+ },
+ {
+ desc: "missing blobs",
+ blobs: concatDigests(missingStoreBlobs, missingFileBlobs),
+ wantMissing: []MissingBlob{
+ {
+ Digest: missingFileBlobs[0],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[1],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[2],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[0],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[1],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[2],
+ Err: errBlobNotInReq,
+ },
+ },
+ wantStored: map[string][]byte{},
+ },
+ {
+ desc: "present and missing blobs",
+ blobs: concatDigests(missingFileBlobs, missingStoreBlobs, getDigests(presentBlobs)),
+ wantStored: map[string][]byte{
+ toString(presentBlobs[0].digest): presentBlobs[0].data,
+ toString(presentBlobs[1].digest): presentBlobs[1].data,
+ toString(presentBlobs[2].digest): presentBlobs[2].data,
+ },
+ wantMissing: []MissingBlob{
+ {
+ Digest: missingFileBlobs[0],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[1],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[2],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[0],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[1],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[2],
+ Err: errBlobNotInReq,
+ },
+ },
+ wantNumBatchUpdates: 1,
+ },
+ {
+ desc: "present and missing blobs with limit > max blob size",
+ blobs: concatDigests(missingStoreBlobs, getDigests(presentBlobs), missingFileBlobs),
+ byteLimit: 500,
+ wantStored: map[string][]byte{
+ toString(presentBlobs[0].digest): presentBlobs[0].data,
+ toString(presentBlobs[1].digest): presentBlobs[1].data,
+ toString(presentBlobs[2].digest): presentBlobs[2].data,
+ },
+ wantMissing: []MissingBlob{
+ {
+ Digest: missingFileBlobs[0],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[1],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[2],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[0],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[1],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[2],
+ Err: errBlobNotInReq,
+ },
+ },
+ wantNumBatchUpdates: 1,
+ },
+ {
+ desc: "present and missing blobs with limit < max blob size",
+ blobs: concatDigests(missingStoreBlobs, missingFileBlobs, getDigests(presentBlobs)),
+ byteLimit: 110,
+ wantStored: map[string][]byte{
+ toString(presentBlobs[0].digest): presentBlobs[0].data,
+ toString(presentBlobs[1].digest): presentBlobs[1].data,
+ toString(presentBlobs[2].digest): presentBlobs[2].data,
+ },
+ wantMissing: []MissingBlob{
+ {
+ Digest: missingFileBlobs[0],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[1],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[2],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[0],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[1],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[2],
+ Err: errBlobNotInReq,
+ },
+ },
+ wantNumBatchUpdates: 1,
+ wantNumByteStreamWrites: 2,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ instance := "instance"
+ fc, err := newFakeCASClient(tc.byteLimit, instance)
+ defer fc.teardown()
+ if err != nil {
+ t.Errorf("err=%q, want nil", err)
+ return
+ }
+
+ cas := CAS{
+ Client: fc,
+ Store: store,
+ CacheCapabilities: &rpb.CacheCapabilities{MaxBatchTotalSizeBytes: tc.byteLimit},
+ }
+ ctx := context.Background()
+ sema := make(chan struct{}, 100)
+ err = cas.Upload(ctx, instance, sema, tc.blobs...)
+
+ if tc.wantMissing != nil {
+ if missing, ok := err.(MissingError); ok {
+ if !reflect.DeepEqual(missing.Blobs, tc.wantMissing) {
+ t.Errorf("missing.Blobs=%q; want=%q", missing.Blobs, tc.wantMissing)
+ }
+ } else {
+ t.Errorf("Unexpected error: %q", err)
+ }
+ } else if err != nil {
+ t.Errorf("Unexpected error: %q", err)
+ }
+
+ casSrv := fc.server.cas
+ if casSrv.BatchReqs() != tc.wantNumBatchUpdates {
+ t.Errorf("casSrv.BatchReqs()=%d, want=%d", casSrv.BatchReqs(), tc.wantNumBatchUpdates)
+ }
+ if casSrv.WriteReqs() != tc.wantNumByteStreamWrites {
+ t.Errorf("casSrv.WriteReqs()=%d, want=%d", casSrv.WriteReqs(), tc.wantNumByteStreamWrites)
+ }
+
+ stored := map[string][]byte{}
+ for _, blob := range tc.blobs {
+ data, ok := casSrv.Get(rdigest.Digest{
+ Hash: blob.Hash,
+ Size: blob.SizeBytes,
+ })
+ if ok {
+ stored[toString(blob)] = data
+ }
+ }
+ if !reflect.DeepEqual(stored, tc.wantStored) {
+ t.Errorf("stored=%q; want=%q", stored, tc.wantStored)
+ }
+ })
+ }
+}
+
+func toBatchReqs(bds []*blobData) []*rpb.BatchUpdateBlobsRequest_Request {
+ var result []*rpb.BatchUpdateBlobsRequest_Request
+ for _, bd := range bds {
+ result = append(result, &rpb.BatchUpdateBlobsRequest_Request{
+ Digest: bd.digest,
+ Data: bd.data,
+ })
+ }
+ return result
+}
+
+func TestLookupBlobsInStore(t *testing.T) {
+ // Blobs that are present in both local Store and file_server.
+ presentBlobs := []*blobData{
+ makeBlobData("5WGm1JJ1x77KSrlRgzxL"),
+ makeBlobData("ZJ0BiCaayupcdD2nRTmXXrre772lCF"),
+ makeBlobData("o2JzZO7qr6dwwR2CmXZtWDJ65ZkT885aruPAe0nm"),
+ }
+ // Blobs present on local Store but missing from file_server.
+ missingFileBlobs := []*rpb.Digest{
+ {
+ Hash: "1a77aacc1ed3ea410230d66f1238d5a8",
+ SizeBytes: 50,
+ },
+ {
+ Hash: "bad2614f186bf481ee339896089825b5",
+ SizeBytes: 60,
+ },
+ {
+ Hash: "6f2bf26893e588575985446bf9fd116e",
+ SizeBytes: 70,
+ },
+ }
+ // Blobs missing from local Store.
+ missingStoreBlobs := []*rpb.Digest{
+ {
+ Hash: "87a890520c755d7b5fd322f6e3c487e2",
+ SizeBytes: 80,
+ },
+ {
+ Hash: "4381b565d55c06d4021488ecaed98704",
+ SizeBytes: 90,
+ },
+ {
+ Hash: "51656a4fad2e76ec95dd969d18e87994",
+ SizeBytes: 100,
+ },
+ }
+ store := digest.NewStore()
+ for _, blob := range presentBlobs {
+ store.Set(makeFakeDigestData(blob.digest, blob.data))
+ }
+ for _, blob := range missingFileBlobs {
+ store.Set(makeFakeDigestData(blob, nil))
+ }
+ for _, tc := range []struct {
+ desc string
+ blobs []*rpb.Digest
+ wantReqs []*rpb.BatchUpdateBlobsRequest_Request
+ wantMissing []MissingBlob
+ }{
+ {
+ desc: "present blobs",
+ blobs: getDigests(presentBlobs),
+ wantReqs: toBatchReqs(presentBlobs),
+ },
+ {
+ desc: "missing blobs",
+ blobs: append(missingFileBlobs, missingStoreBlobs...),
+ wantMissing: []MissingBlob{
+ {
+ Digest: missingFileBlobs[0],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[1],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingFileBlobs[2],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[0],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[1],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingStoreBlobs[2],
+ Err: errBlobNotInReq,
+ },
+ },
+ },
+ {
+ desc: "present and missing blobs",
+ blobs: []*rpb.Digest{
+ presentBlobs[0].digest,
+ missingFileBlobs[0],
+ missingStoreBlobs[0],
+ presentBlobs[1].digest,
+ missingFileBlobs[1],
+ missingStoreBlobs[1],
+ presentBlobs[2].digest,
+ missingFileBlobs[2],
+ missingStoreBlobs[2],
+ },
+ wantReqs: toBatchReqs(presentBlobs),
+ wantMissing: []MissingBlob{
+ {
+ Digest: missingFileBlobs[0],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[0],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingFileBlobs[1],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[1],
+ Err: errBlobNotInReq,
+ },
+ {
+ Digest: missingFileBlobs[2],
+ Err: errFakeSourceNotFound,
+ },
+ {
+ Digest: missingStoreBlobs[2],
+ Err: errBlobNotInReq,
+ },
+ },
+ },
+ } {
+ // Do test here
+ t.Run(tc.desc, func(t *testing.T) {
+ ctx := context.Background()
+ sema := make(chan struct{}, 100)
+ reqs, missing := lookupBlobsInStore(ctx, tc.blobs, store, sema)
+ if !protoEqual(reqs, tc.wantReqs) {
+ t.Errorf("reqs=%q; want %q", reqs, tc.wantReqs)
+ }
+ if !reflect.DeepEqual(missing, tc.wantMissing) {
+ t.Errorf("missing=%q; want %q", missing, tc.wantMissing)
+ }
+ })
+ }
+}
+
+func TestCreateBatchUpdateBlobsRequests(t *testing.T) {
+ blobs := []*blobData{
+ makeBlobData("2aMmqx86iH"),
+ makeBlobData("5WGm1JJ1x77KSrlRgzxL"),
+ makeBlobData("ZJ0BiCaayupcdD2nRTmXXrre772lCF"),
+ makeBlobData("o2JzZO7qr6dwwR2CmXZtWDJ65ZkT885aruPAe0nm"),
+ makeBlobData("q7cBg9I69ZiXwe1U883vSwLIXRZ2eGNUMD2gIeqSqWfLK9IYZh"),
+ makeBlobData("iyBzGRoMAqpTEaseblU5wl9S2aub0tzhOpQYlwhDcRCQh32XSTOIueVN29mC"),
+ }
+ var blobReqs []*rpb.BatchUpdateBlobsRequest_Request
+ for _, blob := range blobs {
+ blobReqs = append(blobReqs, blobDataToBatchUpdateReq(blob))
+ }
+
+ // Large group of blobs for testing `batchBlobLimit`
+ bigBlobReqs := make([]*rpb.BatchUpdateBlobsRequest_Request, batchBlobLimit+2, batchBlobLimit+2)
+ for i := range bigBlobReqs {
+ bigBlobReqs[i] = blobDataToBatchUpdateReq(blobs[i%len(blobs)])
+ }
+
+ instance := "instance"
+ for _, tc := range []struct {
+ desc string
+ reqs []*rpb.BatchUpdateBlobsRequest_Request
+ byteLimit int64
+ want []*rpb.BatchUpdateBlobsRequest
+ }{
+ {
+ desc: "empty input",
+ },
+ {
+ desc: "all blobs in one request",
+ reqs: blobReqs,
+ want: []*rpb.BatchUpdateBlobsRequest{
+ {
+ InstanceName: instance,
+ Requests: blobReqs,
+ },
+ },
+ },
+ {
+ // Each blob, when added to a BatchUpdateBlobsRequest as the only element,
+ // makes the BatchUpdateBlobsRequest proto size <= 150 bytes
+ desc: "limit 150 bytes",
+ reqs: blobReqs,
+ byteLimit: 150,
+ want: []*rpb.BatchUpdateBlobsRequest{
+ {
+ InstanceName: instance,
+ Requests: blobReqs[0:1],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[1:2],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[2:3],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[3:4],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[4:5],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[5:6],
+ },
+ },
+ },
+ {
+ desc: "limit 300 bytes",
+ reqs: blobReqs,
+ byteLimit: 300,
+ want: []*rpb.BatchUpdateBlobsRequest{
+ {
+ InstanceName: instance,
+ Requests: blobReqs[0:3],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[3:5],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[5:6],
+ },
+ },
+ },
+ {
+ desc: "limit 500 bytes",
+ reqs: blobReqs,
+ byteLimit: 500,
+ want: []*rpb.BatchUpdateBlobsRequest{
+ {
+ InstanceName: instance,
+ Requests: blobReqs[0:4],
+ },
+ {
+ InstanceName: instance,
+ Requests: blobReqs[4:6],
+ },
+ },
+ },
+ {
+ desc: "blob count limit",
+ reqs: bigBlobReqs,
+ want: []*rpb.BatchUpdateBlobsRequest{
+ {
+ InstanceName: instance,
+ Requests: bigBlobReqs[0:batchBlobLimit],
+ },
+ {
+ InstanceName: instance,
+ Requests: bigBlobReqs[batchBlobLimit:],
+ },
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ batchReqs := createBatchUpdateBlobsRequests(tc.reqs, instance, tc.byteLimit)
+ if !cmp.Equal(batchReqs, tc.want) {
+ t.Errorf("batchReqs=%q; want %q", batchReqs, tc.want)
+ }
+ })
+ }
+}
diff --git a/remoteexec/cas/fake_cas_for_test.go b/remoteexec/cas/fake_cas_for_test.go
new file mode 100644
index 0000000..2663152
--- /dev/null
+++ b/remoteexec/cas/fake_cas_for_test.go
@@ -0,0 +1,92 @@
+// Copyright 2020 The Goma Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cas
+
+import (
+ "fmt"
+
+ "github.com/bazelbuild/remote-apis-sdks/go/pkg/fakes"
+ rpb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
+ bpb "google.golang.org/genproto/googleapis/bytestream"
+ "google.golang.org/grpc"
+
+ "go.chromium.org/goma/server/rpc/grpctest"
+)
+
+var (
+ errNotImplemented = fmt.Errorf("Function not implemented.")
+)
+
+type fakeCASServer struct {
+ srv *grpc.Server
+ cas *fakes.CAS
+
+ addr string
+ stop func()
+}
+
+func newFakeCASServer() (*fakeCASServer, error) {
+ f := &fakeCASServer{
+ srv: grpc.NewServer(),
+ cas: fakes.NewCAS(),
+ }
+ bpb.RegisterByteStreamServer(f.srv, f.cas)
+ rpb.RegisterContentAddressableStorageServer(f.srv, f.cas)
+ var err error
+ f.addr, f.stop, err = grpctest.StartServer(f.srv)
+ if err != nil {
+ f.stop()
+ return nil, err
+ }
+ return f, nil
+}
+
+type fakeCASClient struct {
+ Client
+ casClient rpb.ContentAddressableStorageClient
+ bsClient bpb.ByteStreamClient
+
+ batchUpdateByteLimit int64
+ server *fakeCASServer
+ conn *grpc.ClientConn
+}
+
+func (f *fakeCASClient) CAS() rpb.ContentAddressableStorageClient {
+ return f.casClient
+}
+
+func (f *fakeCASClient) ByteStream() bpb.ByteStreamClient {
+ return f.bsClient
+}
+
+func (f *fakeCASClient) teardown() {
+ f.conn.Close()
+ f.server.stop()
+}
+
+func newFakeCASClient(byteLimit int64, instances ...string) (*fakeCASClient, error) {
+ if byteLimit == 0 {
+ byteLimit = DefaultBatchByteLimit
+ }
+
+ f := &fakeCASClient{
+ batchUpdateByteLimit: byteLimit,
+ }
+ var err error
+
+ f.server, err = newFakeCASServer()
+ if err != nil {
+ return nil, err
+ }
+
+ f.conn, err = grpc.Dial(f.server.addr, grpc.WithInsecure())
+ if err != nil {
+ return nil, err
+ }
+ f.bsClient = bpb.NewByteStreamClient(f.conn)
+ f.casClient = rpb.NewContentAddressableStorageClient(f.conn)
+
+ return f, nil
+}
diff --git a/remoteexec/cas/fake_digest_for_test.go b/remoteexec/cas/fake_digest_for_test.go
new file mode 100644
index 0000000..cd6f6d5
--- /dev/null
+++ b/remoteexec/cas/fake_digest_for_test.go
@@ -0,0 +1,69 @@
+// Copyright 2020 The Goma Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cas
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ rpb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
+
+ "go.chromium.org/goma/server/remoteexec/digest"
+)
+
+var (
+ errFakeSourceNotFound = errors.New("Source not found")
+)
+
+type testSource struct {
+ r io.ReadCloser
+}
+
+func (s testSource) Open(ctx context.Context) (io.ReadCloser, error) {
+ if s.r == nil {
+ return nil, errFakeSourceNotFound
+ }
+ return s.r, nil
+}
+
+func (s testSource) String() string {
+ return ""
+}
+
+type testReadCloser struct {
+ data []byte
+}
+
+func (rc *testReadCloser) Read(p []byte) (n int, err error) {
+ n = copy(p, rc.data)
+ return n, io.EOF
+}
+
+func (rc *testReadCloser) Close() error {
+ return nil
+}
+
+type fakeDigestData struct {
+ digest *rpb.Digest
+ digest.Source
+}
+
+func (d *fakeDigestData) Digest() *rpb.Digest {
+ return d.digest
+}
+
+func makeFakeDigestData(digest *rpb.Digest, data []byte) *fakeDigestData {
+ var source testSource
+ if data != nil {
+ source.r = &testReadCloser{
+ data: data,
+ }
+ }
+ return &fakeDigestData{
+ digest: digest,
+ Source: source,
+ }
+}
diff --git a/remoteexec/client.go b/remoteexec/client.go
index 7a2b1b3..b826c79 100644
--- a/remoteexec/client.go
+++ b/remoteexec/client.go
@@ -12,8 +12,6 @@
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
bpb "google.golang.org/genproto/googleapis/bytestream"
lpb "google.golang.org/genproto/googleapis/longrunning"
spb "google.golang.org/genproto/googleapis/rpc/status"
@@ -25,21 +23,6 @@
"go.chromium.org/goma/server/rpc"
)
-var (
- numRunningOperations = stats.Int64(
- "go.chromium.org/goma/server/remoteexec.running-operations",
- "Number of current running exec operations",
- stats.UnitDimensionless)
-
- DefaultViews = []*view.View{
- {
- Description: `Number of current running exec operations`,
- Measure: numRunningOperations,
- Aggregation: view.Sum(),
- },
- }
-)
-
// Client is a remoteexec API client to ClientConn.
// CallOptions will be added when calling RPC.
//
@@ -169,13 +152,6 @@
logger := log.FromContext(ctx)
logger.Infof("execute action")
- recordStart := func() {
- stats.Record(ctx, numRunningOperations.M(1))
- }
- recordFinish := func() {
- stats.Record(ctx, numRunningOperations.M(-1))
- }
-
var opName string
var waitReq *rpb.WaitExecutionRequest
resp := &rpb.ExecuteResponse{}
@@ -191,7 +167,7 @@
if waitReq != nil {
stream, err = c.Exec().WaitExecution(ctx, waitReq, opts...)
} else {
- recordStart()
+ recordRemoteExecStart(ctx)
stream, err = c.Exec().Execute(ctx, req, opts...)
}
if err != nil {
@@ -204,7 +180,7 @@
// otherwise, rerun from WaitExecution.
if status.Code(err) == codes.NotFound {
waitReq = nil
- recordFinish()
+ recordRemoteExecFinish(ctx)
return status.Errorf(codes.Unavailable, "operation stream lost: %v", err)
}
return err
@@ -230,7 +206,7 @@
return erespErr(pctx, resp)
}
})
- recordFinish()
+ recordRemoteExecFinish(ctx)
if err == nil {
err = status.FromProto(resp.GetStatus()).Err()
}
diff --git a/remoteexec/exec.go b/remoteexec/exec.go
index 3ca270a..034a35d 100644
--- a/remoteexec/exec.go
+++ b/remoteexec/exec.go
@@ -20,7 +20,10 @@
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
tspb "github.com/golang/protobuf/ptypes/timestamp"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/tag"
"go.opencensus.io/trace"
+ "golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -236,6 +239,107 @@
return e, nil
}
+type gomaInputInterface interface {
+ toDigest(context.Context, *gomapb.ExecReq_Input) (digest.Data, error)
+ upload(context.Context, []*gomapb.FileBlob) ([]string, error)
+}
+
+func uploadInputFiles(ctx context.Context, inputs []*gomapb.ExecReq_Input, gi gomaInputInterface, sema chan struct{}) error {
+ count := 0
+ size := 0
+ batchLimit := 500
+ sizeLimit := 10 * 1024 * 1024
+
+ beginOffset := 0
+ hashKeys := make([]string, len(inputs))
+
+ eg, ctx := errgroup.WithContext(ctx)
+
+ for i, input := range inputs {
+ count++
+ size += len(input.Content.Content)
+
+ // Upload a bunch of file blobs if one of the following:
+ // - inputs[uploadBegin:i] reached the upload blob count limit
+ // - inputs[uploadBegin:i] exceeds the upload blob size limit
+ // - we are on the last blob to be uploaded
+ if count < batchLimit && size < sizeLimit && i < len(inputs)-1 {
+ continue
+ }
+
+ inputs := inputs[beginOffset : i+1]
+ results := hashKeys[beginOffset : i+1]
+ eg.Go(func() error {
+ sema <- struct{}{}
+ defer func() {
+ <-sema
+ }()
+
+ contents := make([]*gomapb.FileBlob, len(inputs))
+ for i, input := range inputs {
+ contents[i] = input.Content
+ }
+
+ var hks []string
+ var err error
+ err = rpc.Retry{}.Do(ctx, func() error {
+ hks, err = gi.upload(ctx, contents)
+ return err
+ })
+
+ if err != nil {
+ return fmt.Errorf("setup %s input error: %v", inputs[0].GetFilename(), err)
+ }
+ if len(hks) != len(contents) {
+ return fmt.Errorf("invalid number of hash keys: %d, want %d", len(hks), len(contents))
+ }
+ for i, hk := range hks {
+ input := inputs[i]
+ if input.GetHashKey() != hk {
+ return fmt.Errorf("hashkey missmatch: embedded input %s %s != %s", input.GetFilename(), input.GetHashKey(), hk)
+ }
+ results[i] = hk
+ }
+ return nil
+ })
+ beginOffset = i + 1
+ count = 0
+ size = 0
+ }
+
+ defer func() {
+ maxOutputSize := len(inputs)
+ if maxOutputSize > 10 {
+ maxOutputSize = 10
+ }
+ successfulUploadsMsg := make([]string, 0, maxOutputSize+1)
+ for i, input := range inputs {
+ if len(hashKeys[i]) == 0 {
+ continue
+ }
+ if i == maxOutputSize && i < len(inputs)-1 {
+ successfulUploadsMsg = append(successfulUploadsMsg, "...")
+ break
+ }
+ successfulUploadsMsg = append(successfulUploadsMsg, fmt.Sprintf("%s -> %s", input.GetFilename(), hashKeys[i]))
+ }
+ logger := log.FromContext(ctx)
+ logger.Infof("embedded inputs: %v", successfulUploadsMsg)
+
+ numSuccessfulUploads := 0
+ for _, hk := range hashKeys {
+ if len(hk) > 0 {
+ numSuccessfulUploads++
+ }
+ }
+ if numSuccessfulUploads < len(inputs) {
+ logger.Errorf("%d file blobs successfully uploaded, out of %d", numSuccessfulUploads, len(inputs))
+ }
+ }()
+
+ return eg.Wait()
+}
+
type inputFileResult struct {
missingInput string
missingReason string
@@ -244,11 +348,6 @@
err error
}
-type gomaInputInterface interface {
- toDigest(context.Context, *gomapb.ExecReq_Input) (digest.Data, error)
- upload(context.Context, *gomapb.FileBlob) (string, error)
-}
-
func inputFiles(ctx context.Context, inputs []*gomapb.ExecReq_Input, gi gomaInputInterface, rootRel func(string) (string, error), executableInputs map[string]bool, sema chan struct{}) []inputFileResult {
logger := log.FromContext(ctx)
var wg sync.WaitGroup
@@ -258,7 +357,7 @@
results := make([]inputFileResult, len(inputs))
for i, input := range inputs {
wg.Add(1)
- go func(index int, input *gomapb.ExecReq_Input) {
+ go func(input *gomapb.ExecReq_Input, result *inputFileResult) {
defer wg.Done()
sema <- struct{}{}
defer func() {
@@ -271,18 +370,14 @@
logger.Warnf("filename %s: %v", input.GetFilename(), err)
return
}
- results[index] = inputFileResult{
- err: fmt.Errorf("input file: %s %v", input.GetFilename(), err),
- }
+ result.err = fmt.Errorf("input file: %s %v", input.GetFilename(), err)
return
}
data, err := gi.toDigest(ctx, input)
if err != nil {
- results[index] = inputFileResult{
- missingInput: input.GetFilename(),
- missingReason: fmt.Sprintf("input: %v", err),
- }
+ result.missingInput = input.GetFilename()
+ result.missingReason = fmt.Sprintf("input: %v", err)
return
}
file := merkletree.Entry{
@@ -293,33 +388,15 @@
},
IsExecutable: executableInputs[input.GetFilename()],
}
+ result.file = file
if input.Content == nil {
- results[index] = inputFileResult{
- file: file,
- }
return
}
- results[index] = inputFileResult{
- file: file,
- uploaded: true,
- }
- var hk string
- err = rpc.Retry{}.Do(ctx, func() error {
- hk, err = gi.upload(ctx, input.Content)
- return err
- })
- if err != nil {
- logger.Errorf("setup %d %s input error: %v", index, input.GetFilename(), err)
- return
- }
- if input.GetHashKey() != hk {
- logger.Errorf("hashkey missmatch: embedded input %s %s != %s", input.GetFilename(), input.GetHashKey(), hk)
- return
- }
- logger.Infof("embedded input %s %s", input.GetFilename(), hk)
- }(i, input)
+ result.uploaded = true
+ }(input, &results[i])
}
wg.Wait()
+
return results
}
@@ -382,18 +459,13 @@
cleanCWD := r.filepath.Clean(r.gomaReq.GetCwd())
cleanRootDir := r.filepath.Clean(r.tree.RootDir())
- concurrent := r.f.FileLookupConcurrency
- if concurrent == 0 {
- concurrent = 1
- }
- sema := make(chan struct{}, concurrent)
-
- results := inputFiles(ctx, r.gomaReq.Input, gomaInput{
+ gi := gomaInput{
gomaFile: r.f.GomaFile,
digestCache: r.f.DigestCache,
- }, func(filename string) (string, error) {
+ }
+ results := inputFiles(ctx, r.gomaReq.Input, gi, func(filename string) (string, error) {
return rootRel(r.filepath, filename, cleanCWD, cleanRootDir)
- }, executableInputs, sema)
+ }, executableInputs, r.f.FileLookupSema)
for _, result := range results {
if result.err != nil {
r.err = result.err
@@ -401,6 +473,20 @@
}
}
+ uploads := make([]*gomapb.ExecReq_Input, 0, len(r.gomaReq.Input))
+ for i, input := range r.gomaReq.Input {
+ result := &results[i]
+ if result.uploaded {
+ uploads = append(uploads, input)
+ }
+ }
+
+ err = uploadInputFiles(ctx, uploads, gi, r.f.FileLookupSema)
+ if err != nil {
+ r.err = err
+ return nil
+ }
+
var uploaded int
var files []merkletree.Entry
var missingInputs []string
@@ -523,24 +609,38 @@
return nil
}
+type wrapperType int
+
const (
- wrapperScript = `#!/bin/bash
+ wrapperCwdAgnostic wrapperType = iota
+ wrapperBindMount
+ wrapperNsjailChroot
+ wrapperWin
+)
+
+func (w wrapperType) String() string {
+ switch w {
+ case wrapperCwdAgnostic:
+ return "wrapper-cwd-agnostic"
+ case wrapperBindMount:
+ return "wrapper-bind-mount"
+ case wrapperNsjailChroot:
+ return "wrapper-nsjail-chroot"
+ case wrapperWin:
+ return "wrapper-win"
+ default:
+ return fmt.Sprintf("wrapper-unknown-%d", int(w))
+ }
+}
+
+const (
+ bindMountWrapperScript = `#!/bin/bash
# run command (i.e. "$@") at the same dir as user.
# INPUT_ROOT_DIR: expected directory of input root.
# input root is current directory to run this script.
# need to mount input root on $INPUT_ROOT_DIR.
# WORK_DIR: working directory relative to INPUT_ROOT_DIR.
# command will run at $INPUT_ROOT_DIR/$WORK_DIR.
-#
-# by default, it runs in sibling docker (for Cloud RBE).
-# with the same uid
-# with current directory (input root) mounted as same path as user's input root
-# with the same working directory as user
-# with the same container image
-# to run the command line as user requested.
-#
-# if /opt/goma/bin/run-at-dir.sh exists in contianer image, use it instead.
-# http://b/132742952
set -e
# check inputs.
@@ -560,37 +660,12 @@
exit 1
fi
-## get container id
-containerid="$(basename "$(cat /proc/self/cpuset)")"
-
-## get image url from current container.
-image="$(docker inspect --format '{{.Config.Image}}' "$containerid")"
-
-
-## get volume source dir for this directory.
-set +e # docker inspect might fails, depending on client version.
-rundir="$(docker inspect --format '{{range .Mounts}}{{if eq .Destination "'"$(pwd)"'"}}{{.Source}}{{end -}}{{end}}' "$containerid" 2>/dev/null)"
-if [[ "$rundir" = "" ]]; then
- # for legacy docker client
- rundir="$(docker inspect --format '{{index .Volumes "'"$(pwd)"'"}}' "$containerid")"
-fi
-set -e
-if [[ "$rundir" = "" ]]; then
- echo "error: failed to detect volume source dir" >&2
- docker version
- exit 1
-fi
-
-# TODO: use PWD instead of INPUT_ROOT_DIR if appricable.
-
-docker run \
- -u "$(id -u)" \
- --volume "${rundir}:${INPUT_ROOT_DIR}" \
- --workdir "${INPUT_ROOT_DIR}/${WORK_DIR}" \
- --env-file "${WORK_DIR}/env_file_for_docker" \
- --rm \
- "$image" \
- "$@"
+mkdir -p "${INPUT_ROOT_DIR}" || true # require root
+mount --bind "$(pwd)" "${INPUT_ROOT_DIR}" # require privileged
+cd "${INPUT_ROOT_DIR}/${WORK_DIR}"
+# TODO: run as normal user, not as root.
+# run as nobody will fail with "unable to open output file. permission denied"
+"$@"
`
cwdAgnosticWrapperScript = `#!/bin/bash
@@ -633,67 +708,86 @@
args := buildArgs(ctx, cmdConfig, argv0, r.gomaReq)
// TODO: only allow whitelisted envs.
+ wt := wrapperCwdAgnostic
pathType := cmdConfig.GetCmdDescriptor().GetSetup().GetPathType()
- const posixWrapperName = "run.sh"
switch pathType {
case cmdpb.CmdDescriptor_POSIX:
if r.needChroot {
- logger.Infof("run with chroot")
- // needed for bind mount.
- r.addPlatformProperty(ctx, "dockerPrivileged", "true")
- // needed for chroot command and mount command.
- r.addPlatformProperty(ctx, "dockerRunAsRoot", "true")
- nsjailCfg := nsjailConfig(cwd, r.filepath, r.gomaReq.GetToolchainSpecs(), r.gomaReq.Env)
- files = []fileDesc{
- {
- name: posixWrapperName,
- data: digest.Bytes("nsjail-run-wrapper-script", []byte(nsjailRunWrapperScript)),
- isExecutable: true,
- },
- {
- name: "nsjail.cfg",
- data: digest.Bytes("nsjail-config-file", []byte(nsjailCfg)),
- },
- }
+ wt = wrapperNsjailChroot
} else {
err = cwdAgnosticReq(ctx, cmdConfig, r.filepath, r.gomaReq.Arg, r.gomaReq.Env)
if err != nil {
+ wt = wrapperBindMount
logger.Infof("non cwd agnostic: %v", err)
- envs = append(envs, fmt.Sprintf("INPUT_ROOT_DIR=%s", r.tree.RootDir()))
-
- r.addPlatformProperty(ctx, "dockerSiblingContainers", "true")
- files = []fileDesc{
- {
- name: posixWrapperName,
- data: digest.Bytes("wrapper-script", []byte(wrapperScript)),
- isExecutable: true,
- },
- {
- name: "env_file_for_docker",
- data: digest.Bytes("envfile", []byte(strings.Join(r.gomaReq.Env, "\n"))),
- },
- }
- } else {
- logger.Infof("cwd agnostic")
- for _, e := range r.gomaReq.Env {
- if strings.HasPrefix(e, "PWD=") {
- // PWD is usually absolute path.
- // if cwd agnostic, then we should remove
- // PWD environment variable.
- continue
- }
- envs = append(envs, e)
- }
- files = []fileDesc{
- {
- name: posixWrapperName,
- data: digest.Bytes("cwd-agnostic-wrapper-script", []byte(cwdAgnosticWrapperScript)),
- isExecutable: true,
- },
- }
}
}
case cmdpb.CmdDescriptor_WINDOWS:
+ wt = wrapperWin
+ default:
+ return fmt.Errorf("bad path type: %v", pathType)
+ }
+
+ const posixWrapperName = "run.sh"
+ switch wt {
+ case wrapperNsjailChroot:
+ logger.Infof("run with nsjail chroot")
+ // needed for bind mount.
+ r.addPlatformProperty(ctx, "dockerPrivileged", "true")
+ // needed for chroot command and mount command.
+ r.addPlatformProperty(ctx, "dockerRunAsRoot", "true")
+ nsjailCfg := nsjailConfig(cwd, r.filepath, r.gomaReq.GetToolchainSpecs(), r.gomaReq.Env)
+ files = []fileDesc{
+ {
+ name: posixWrapperName,
+ data: digest.Bytes("nsjail-run-wrapper-script", []byte(nsjailRunWrapperScript)),
+ isExecutable: true,
+ },
+ {
+ name: "nsjail.cfg",
+ data: digest.Bytes("nsjail-config-file", []byte(nsjailCfg)),
+ },
+ }
+ case wrapperBindMount:
+ logger.Infof("run with bind mount")
+ envs = append(envs, fmt.Sprintf("INPUT_ROOT_DIR=%s", r.tree.RootDir()))
+ // needed for nsjail (bind mount).
+ // https://cloud.google.com/remote-build-execution/docs/remote-execution-properties#container_properties
+ // dockerAddCapabilities=SYS_ADMIN is not sufficient.
+ // need -security-opt=apparmor:unconfined too?
+ // https://github.com/moby/moby/issues/16429
+ r.addPlatformProperty(ctx, "dockerPrivileged", "true")
+ // needed for mkdir $INPUT_ROOT_DIR
+ r.addPlatformProperty(ctx, "dockerRunAsRoot", "true")
+ for _, e := range r.gomaReq.Env {
+ envs = append(envs, e)
+ }
+ files = []fileDesc{
+ {
+ name: posixWrapperName,
+ data: digest.Bytes("nsjail-bind-mount-wrapper-script", []byte(bindMountWrapperScript)),
+ isExecutable: true,
+ },
+ }
+ case wrapperCwdAgnostic:
+ logger.Infof("run with chdir: cwd agnostic")
+ for _, e := range r.gomaReq.Env {
+ if strings.HasPrefix(e, "PWD=") {
+ // PWD is usually absolute path.
+ // if cwd agnostic, then we should remove
+ // PWD environment variable.
+ continue
+ }
+ envs = append(envs, e)
+ }
+ files = []fileDesc{
+ {
+ name: posixWrapperName,
+ data: digest.Bytes("cwd-agnostic-wrapper-script", []byte(cwdAgnosticWrapperScript)),
+ isExecutable: true,
+ },
+ }
+ case wrapperWin:
+ logger.Infof("run on win")
wn, data, err := wrapperForWindows(ctx)
if err != nil {
return err
@@ -705,9 +799,8 @@
isExecutable: true,
},
}
-
default:
- return fmt.Errorf("bad path type: %v", pathType)
+ return fmt.Errorf("bad wrapper type: %v", wt)
}
// Only the first one is called in the command line via storing
@@ -739,6 +832,11 @@
wrapperPath = "./" + posixWrapperName
}
r.args = append([]string{wrapperPath}, args...)
+
+ err = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(wrapperTypeKey, wt.String())}, wrapperCount.M(1))
+ if err != nil {
+ logger.Errorf("record wrapper-count %s: %v", wt, err)
+ }
return nil
}
@@ -775,8 +873,13 @@
return gccCwdAgnostic(filepath, args, envs)
case "clang-cl":
return clangclCwdAgnostic(args, envs)
+ case "javac":
+ // Currently, javac in Chromium is fully cwd agnostic. Simpler just to
+ // support only the cwd agnostic case and let it fail if the client passed
+ // in invalid absolute paths.
+ return nil
default:
- // "cl.exe", "javac", "clang-tidy"
+ // "cl.exe", "clang-tidy"
return fmt.Errorf("no cwd agnostic check for %s", name)
}
}
@@ -922,9 +1025,9 @@
return resp, true
}
-func (r *request) missingBlobs(ctx context.Context) []*rpb.Digest {
+func (r *request) missingBlobs(ctx context.Context) ([]*rpb.Digest, error) {
if r.err != nil {
- return nil
+ return nil, r.err
}
var blobs []*rpb.Digest
err := rpc.Retry{}.Do(ctx, func() error {
@@ -934,9 +1037,9 @@
})
if err != nil {
r.err = err
- return nil
+ return nil, err
}
- return blobs
+ return blobs, nil
}
func inputForDigest(ds *digest.Store, d *rpb.Digest) (string, error) {
@@ -1006,11 +1109,11 @@
}
}
-func (r *request) uploadBlobs(ctx context.Context, blobs []*rpb.Digest) *gomapb.ExecResp {
+func (r *request) uploadBlobs(ctx context.Context, blobs []*rpb.Digest) (*gomapb.ExecResp, error) {
if r.err != nil {
- return nil
+ return nil, r.err
}
- err := r.cas.Upload(ctx, r.instanceName(), blobs...)
+ err := r.cas.Upload(ctx, r.instanceName(), r.f.CASBlobLookupSema, blobs...)
if err != nil {
if missing, ok := err.(cas.MissingError); ok {
logger := log.FromContext(ctx)
@@ -1031,7 +1134,7 @@
r.gomaResp.MissingReason = missingReason
sortMissing(r.gomaReq.Input, r.gomaResp)
logFileList(logger, "missing inputs", r.gomaResp.MissingInput)
- return r.gomaResp
+ return r.gomaResp, nil
}
// failed to upload non-input, so no need to report
// missing input to users.
@@ -1039,7 +1142,7 @@
}
r.err = err
}
- return nil
+ return nil, err
}
func (r *request) executeAction(ctx context.Context) (*rpb.ExecuteResponse, error) {
diff --git a/remoteexec/exec_test.go b/remoteexec/exec_test.go
index b9b6a9f..acfb868 100644
--- a/remoteexec/exec_test.go
+++ b/remoteexec/exec_test.go
@@ -9,9 +9,12 @@
"context"
"errors"
"fmt"
+ "sort"
+ "sync"
"testing"
"github.com/golang/protobuf/proto"
+ "github.com/google/go-cmp/cmp"
"go.chromium.org/goma/server/hash"
"go.chromium.org/goma/server/log"
@@ -170,11 +173,16 @@
}
type fakeGomaInput struct {
- digests map[*gomapb.ExecReq_Input]digest.Data
- hashes map[*gomapb.FileBlob]string
+ mu sync.Mutex
+ digests map[*gomapb.ExecReq_Input]digest.Data
+ hashes map[*gomapb.FileBlob]string
+ uploaded []string
+ numUploads int
}
func (f *fakeGomaInput) setInputs(inputs []*gomapb.ExecReq_Input) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
if f.digests == nil {
f.digests = make(map[*gomapb.ExecReq_Input]digest.Data)
}
@@ -182,7 +190,7 @@
f.hashes = make(map[*gomapb.FileBlob]string)
}
for _, input := range inputs {
- f.digests[input] = digest.Bytes(input.GetFilename(), input.Content.Content)
+ f.digests[input] = digest.Bytes(input.GetFilename(), input.Content.GetContent())
f.hashes[input.Content] = input.GetHashKey()
}
}
@@ -195,12 +203,271 @@
return d, nil
}
-func (f *fakeGomaInput) upload(ctx context.Context, blob *gomapb.FileBlob) (string, error) {
- h, ok := f.hashes[blob]
- if !ok {
- return "", errors.New("upload error")
+func (f *fakeGomaInput) upload(ctx context.Context, blobs []*gomapb.FileBlob) ([]string, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ hashes := make([]string, 0, len(blobs))
+ for _, blob := range blobs {
+ h, ok := f.hashes[blob]
+ if !ok {
+ return nil, errors.New("upload error")
+ }
+ f.uploaded = append(f.uploaded, h)
+ f.numUploads++
+ hashes = append(hashes, h)
}
- return h, nil
+ return hashes, nil
+}
+
+func makeInput(tb testing.TB, content, filename string) *gomapb.ExecReq_Input {
+ tb.Helper()
+ blob := &gomapb.FileBlob{
+ BlobType: gomapb.FileBlob_FILE.Enum(),
+ Content: []byte(content),
+ FileSize: proto.Int64(int64(len(content))),
+ }
+ hashkey, err := hash.SHA256Proto(blob)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ return &gomapb.ExecReq_Input{
+ HashKey: proto.String(hashkey),
+ Filename: proto.String(filename),
+ Content: blob,
+ }
+}
+
+func TestUploadInputFiles(t *testing.T) {
+ sema := make(chan struct{}, 3)
+
+ inputs := make([]*gomapb.ExecReq_Input, 6)
+ for i := range inputs {
+ inputs[i] = makeInput(t, fmt.Sprintf("content %d", i), fmt.Sprintf("input_%d", i))
+ }
+
+ manyInputs := make([]*gomapb.ExecReq_Input, 1000)
+ for i := range manyInputs {
+ manyInputs[i] = makeInput(t, fmt.Sprintf("content %d", i), fmt.Sprintf("input_%d", i))
+ }
+
+ makeHashKeys := func(inputs []*gomapb.ExecReq_Input) []string {
+ result := make([]string, len(inputs))
+ for i, input := range inputs {
+ result[i] = input.GetHashKey()
+ }
+ return result
+ }
+
+ for _, tc := range []struct {
+ desc string
+ stored []*gomapb.ExecReq_Input
+ inputs []*gomapb.ExecReq_Input
+ wantUploaded []string
+ wantNumUploads int
+ wantErr bool
+ }{
+ {
+ desc: "uploads",
+ stored: inputs,
+ inputs: inputs,
+ wantUploaded: makeHashKeys(inputs),
+ wantNumUploads: len(inputs),
+ }, {
+ desc: "many uploads",
+ stored: manyInputs,
+ inputs: manyInputs,
+ wantUploaded: makeHashKeys(manyInputs),
+ wantNumUploads: len(manyInputs),
+ }, {
+ desc: "all errors",
+ inputs: inputs,
+ wantErr: true,
+ }, {
+ desc: "partial errors",
+ stored: inputs[:3],
+ inputs: inputs,
+ wantUploaded: makeHashKeys(manyInputs[:3]),
+ wantNumUploads: len(inputs[:3]),
+ wantErr: true,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ gi := &fakeGomaInput{}
+ ctx := context.Background()
+
+ gi.setInputs(tc.stored)
+
+ err := uploadInputFiles(ctx, tc.inputs, gi, sema)
+
+ sort.Strings(gi.uploaded)
+ sort.Strings(tc.wantUploaded)
+ if !cmp.Equal(gi.uploaded, tc.wantUploaded) {
+ t.Errorf("gi.uploaded -want +got: %s", cmp.Diff(tc.wantUploaded, gi.uploaded))
+ }
+ if gi.numUploads != tc.wantNumUploads {
+ t.Errorf("numUploads=%d; want %d", gi.numUploads, tc.wantNumUploads)
+ }
+ if err != nil && !tc.wantErr {
+ t.Errorf("err=%v; want nil", err)
+ }
+ })
+ }
+}
+
+func TestInputFiles(t *testing.T) {
+ sema := make(chan struct{}, 3)
+
+ // These are minimal function / map that are not being tested.
+ rootRel := func(filename string) (string, error) { return filename, nil }
+ executableInputs := map[string]bool{}
+
+ inputs := make([]*gomapb.ExecReq_Input, 6)
+ inputsNoContent := make([]*gomapb.ExecReq_Input, len(inputs))
+ for i := range inputs {
+ input := makeInput(t, fmt.Sprintf("content %d", i), fmt.Sprintf("input_%d", i))
+ inputs[i] = input
+ inputsNoContent[i] = &gomapb.ExecReq_Input{
+ HashKey: input.HashKey,
+ Filename: input.Filename,
+ }
+ }
+
+ manyInputs := make([]*gomapb.ExecReq_Input, 1000)
+ for i := range manyInputs {
+ manyInputs[i] = makeInput(t, fmt.Sprintf("content %d", i), fmt.Sprintf("input_%d", i))
+ }
+
+ makeMissing := func(input *gomapb.ExecReq_Input) inputFileResult {
+ return inputFileResult{
+ missingInput: input.GetFilename(),
+ missingReason: "input: not found",
+ }
+ }
+ makeFound := func(input *gomapb.ExecReq_Input) inputFileResult {
+ return inputFileResult{
+ file: merkletree.Entry{
+ Name: input.GetFilename(), // Because `rootRel()` is an identity function
+ Data: inputDigestData{
+ filename: input.GetFilename(),
+ Data: digest.Bytes(input.GetFilename(), input.Content.GetContent()),
+ },
+ },
+ uploaded: input.Content != nil,
+ }
+ }
+
+ for _, tc := range []struct {
+ desc string
+ stored []*gomapb.ExecReq_Input
+ inputs []*gomapb.ExecReq_Input
+ wantResult []inputFileResult
+ }{
+ {
+ desc: "all missing",
+ inputs: inputsNoContent,
+ wantResult: []inputFileResult{
+ makeMissing(inputs[0]),
+ makeMissing(inputs[1]),
+ makeMissing(inputs[2]),
+ makeMissing(inputs[3]),
+ makeMissing(inputs[4]),
+ makeMissing(inputs[5]),
+ },
+ }, {
+ desc: "new content but not stored",
+ inputs: inputs,
+ wantResult: []inputFileResult{
+ makeMissing(inputs[0]),
+ makeMissing(inputs[1]),
+ makeMissing(inputs[2]),
+ makeMissing(inputs[3]),
+ makeMissing(inputs[4]),
+ makeMissing(inputs[5]),
+ },
+ }, {
+ desc: "all stored no content",
+ stored: inputsNoContent,
+ inputs: inputsNoContent,
+ wantResult: []inputFileResult{
+ makeFound(inputsNoContent[0]),
+ makeFound(inputsNoContent[1]),
+ makeFound(inputsNoContent[2]),
+ makeFound(inputsNoContent[3]),
+ makeFound(inputsNoContent[4]),
+ makeFound(inputsNoContent[5]),
+ },
+ }, {
+ desc: "all uploaded",
+ stored: inputs,
+ inputs: inputs,
+ wantResult: []inputFileResult{
+ makeFound(inputs[0]),
+ makeFound(inputs[1]),
+ makeFound(inputs[2]),
+ makeFound(inputs[3]),
+ makeFound(inputs[4]),
+ makeFound(inputs[5]),
+ },
+ },
+ {
+ desc: "mixed",
+ stored: []*gomapb.ExecReq_Input{
+ inputs[0],
+ inputsNoContent[1],
+ inputs[3],
+ inputsNoContent[4],
+ },
+ inputs: []*gomapb.ExecReq_Input{
+ inputs[0],
+ inputsNoContent[1],
+ inputs[2],
+ inputs[3],
+ inputsNoContent[4],
+ inputsNoContent[5],
+ },
+ wantResult: []inputFileResult{
+ makeFound(inputs[0]),
+ makeFound(inputsNoContent[1]),
+ makeMissing(inputs[2]),
+ makeFound(inputs[3]),
+ makeFound(inputsNoContent[4]),
+ makeMissing(inputs[5]),
+ },
+ }, {
+ desc: "many uploads",
+ stored: manyInputs,
+ inputs: manyInputs,
+ wantResult: func() []inputFileResult {
+ result := make([]inputFileResult, len(manyInputs))
+ for i, input := range manyInputs {
+ result[i] = makeFound(input)
+ }
+ return result
+ }(),
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ gi := &fakeGomaInput{}
+ gi.setInputs(tc.stored)
+ ctx := context.Background()
+
+ results := inputFiles(ctx, tc.inputs, gi, rootRel, executableInputs, sema)
+
+ digestDataComparer := cmp.Comparer(func(x, y digest.Data) bool {
+ if x == nil && y == nil {
+ return true
+ }
+ if x == nil || y == nil {
+ return false
+ }
+ return proto.Equal(x.Digest(), y.Digest()) && x.String() == y.String()
+ })
+
+ if !cmp.Equal(results, tc.wantResult, cmp.AllowUnexported(inputFileResult{}), cmp.AllowUnexported(inputDigestData{}), digestDataComparer) {
+ t.Errorf("results=%v; want %v", results, tc.wantResult)
+ }
+ })
+ }
}
type nopLogger struct{}
@@ -220,21 +487,7 @@
func BenchmarkInputFiles(b *testing.B) {
var inputs []*gomapb.ExecReq_Input
for i := 0; i < 1000; i++ {
- content := fmt.Sprintf("content %d", i)
- blob := &gomapb.FileBlob{
- BlobType: gomapb.FileBlob_FILE.Enum(),
- Content: []byte(content),
- FileSize: proto.Int64(int64(len(content))),
- }
- hashkey, err := hash.SHA256Proto(blob)
- if err != nil {
- b.Fatal(err)
- }
- inputs = append(inputs, &gomapb.ExecReq_Input{
- HashKey: proto.String(hashkey),
- Filename: proto.String(fmt.Sprintf("input_%d", i)),
- Content: blob,
- })
+ inputs = append(inputs, makeInput(b, fmt.Sprintf("content %d", i), fmt.Sprintf("input_%d", i)))
}
gi := &fakeGomaInput{}
gi.setInputs(inputs)
diff --git a/remoteexec/fake_cluster_for_test.go b/remoteexec/fake_cluster_for_test.go
index 8c48e27..317c9cc 100644
--- a/remoteexec/fake_cluster_for_test.go
+++ b/remoteexec/fake_cluster_for_test.go
@@ -172,14 +172,15 @@
defers = append(defers, func() { f.fconn.Close() })
f.adapter = Adapter{
- InstancePrefix: instancePrefix,
- ExecTimeout: 10 * time.Second,
- Client: Client{ClientConn: f.conn},
- GomaFile: fpb.NewFileServiceClient(f.fconn),
- DigestCache: digest.NewCache(&f.redis),
- CmdStorage: &f.cmdStorage,
- ToolDetails: &rpb.ToolDetails{},
- FileLookupConcurrency: 2,
+ InstancePrefix: instancePrefix,
+ ExecTimeout: 10 * time.Second,
+ Client: Client{ClientConn: f.conn},
+ GomaFile: fpb.NewFileServiceClient(f.fconn),
+ DigestCache: digest.NewCache(&f.redis),
+ CmdStorage: &f.cmdStorage,
+ ToolDetails: &rpb.ToolDetails{},
+ FileLookupSema: make(chan struct{}, 2),
+ CASBlobLookupSema: make(chan struct{}, 2),
}
defers = nil
diff --git a/remoteexec/gcc.go b/remoteexec/gcc.go
index ee20cf5..ca36e65 100644
--- a/remoteexec/gcc.go
+++ b/remoteexec/gcc.go
@@ -44,6 +44,20 @@
var subCmd string
for _, arg := range args {
switch {
+ case arg == "-fdebug-compilation-dir":
+ // We can stop checking the rest of the flags. When seeing
+ // "-fdebug-compilation-dir", we expect the result to be CWD agnostic.
+ //
+ // Note that this check applies to both GCC and Clang and returns nil
+ // immediately for the following cases:
+ // -xx -fdebug-compilation-dir . -yy ... <- GCC flag
+ // -xx -XClang -fdebug-compilation-dir -XClang . -yy ... <- Clang flag
+ //
+ // As a result, clangArgCwdAgnostic() doesn't need to check this again.
+ if subCmd == "" || subCmd == "clang" {
+ return nil
+ }
+ return errors.New("fdebug-compilation-dir not supported for " + subCmd)
case pathFlag:
if filepath.IsAbs(arg) {
return fmt.Errorf("abs path: %s", arg)
diff --git a/remoteexec/gcc_test.go b/remoteexec/gcc_test.go
index 35b388e..c1731e3 100644
--- a/remoteexec/gcc_test.go
+++ b/remoteexec/gcc_test.go
@@ -191,6 +191,90 @@
}
}
+func TestGccCwdAgnosticForDebugCompilationDir(t *testing.T) {
+ // Tests for supporting "-fdebug-compilation-dir", see b/135719929.
+ // We could have merged the cases here into TestGccCwdAgnostic, but decided
+ // to separate them for clarity.
+
+ // Do not set "-g*" options in baseReleaseArgs!
+ baseReleaseArgs := []string{
+ "../../third_party/llvm-build/Release+Asserts/bin/clang++",
+ "../../base/time/time.cc",
+ }
+
+ // Since "-fdebug-compilation-dir" has been moved to clang driver flags in
+ // https://reviews.llvm.org/D63387, we set cases both with and w/o "-Xclang"
+ for _, tc := range []struct {
+ desc string
+ args []string
+ envs []string
+ cwdAgnostic bool
+ }{
+ {
+ desc: "basic",
+ args: append(append([]string{}, baseReleaseArgs...),
+ "-fdebug-compilation-dir",
+ "."),
+ cwdAgnostic: true,
+ },
+ {
+ desc: "-Xclang",
+ args: append(append([]string{}, baseReleaseArgs...),
+ "-Xclang",
+ "-fdebug-compilation-dir",
+ "-Xclang",
+ "."),
+ cwdAgnostic: true,
+ },
+ {
+ desc: "With -g* DBG options",
+ args: append(append([]string{}, baseReleaseArgs...),
+ "-g2",
+ "-gsplit-dwarf",
+ "-fdebug-compilation-dir",
+ "."),
+ cwdAgnostic: true,
+ },
+ {
+ desc: "-Xclang with -g* DBG option",
+ args: append(append([]string{}, baseReleaseArgs...),
+ "-g2",
+ "-gsplit-dwarf",
+ "-Xclang",
+ "-fdebug-compilation-dir",
+ "-Xclang",
+ "."),
+ cwdAgnostic: true,
+ },
+ {
+ // Make sure the CWD agnostic still returns false if
+ // "-fdebug-compilation-dir" is not specified.
+ desc: "Only -g* DBG options",
+ args: append(append([]string{}, baseReleaseArgs...),
+ "-g2",
+ "-gsplit-dwarf"),
+ cwdAgnostic: false,
+ },
+ {
+ // "-fdebug-compilation-dir" is not supported as LLVM flags.
+ desc: "No LLVM",
+ args: append(append([]string{}, baseReleaseArgs...),
+ "-mllvm",
+ "-fdebug-compilation-dir",
+ "-mllvm",
+ "."),
+ cwdAgnostic: false,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ err := gccCwdAgnostic(posixpath.FilePath{}, tc.args, tc.envs)
+ if (err == nil) != tc.cwdAgnostic {
+ t.Errorf("gccCwdAgnostic(posixpath.FilePath, args, envs)=%v; cwdAgnostic=%t", err, tc.cwdAgnostic)
+ }
+ })
+ }
+}
+
func TestGccOutputs(t *testing.T) {
for _, tc := range []struct {
desc string
diff --git a/remoteexec/gomainput.go b/remoteexec/gomainput.go
index 21f4b07..a2a2287 100644
--- a/remoteexec/gomainput.go
+++ b/remoteexec/gomainput.go
@@ -57,22 +57,30 @@
return gi.digestCache.Get(ctx, hashKey, src)
}
-func (gi gomaInput) upload(ctx context.Context, content *gomapb.FileBlob) (string, error) {
- if content == nil {
- return "", status.Error(codes.FailedPrecondition, "upload: contents must not be nil.")
+func (gi gomaInput) upload(ctx context.Context, content []*gomapb.FileBlob) ([]string, error) {
+ if len(content) == 0 {
+ return nil, status.Error(codes.FailedPrecondition, "upload: contents must not be empty.")
+ }
+ for _, c := range content {
+ if c == nil {
+ return nil, status.Error(codes.FailedPrecondition, "upload: contents must not be nil.")
+ }
}
resp, err := gi.gomaFile.StoreFile(ctx, &gomapb.StoreFileReq{
- Blob: []*gomapb.FileBlob{
- content,
- },
+ Blob: content,
})
if err != nil {
- return "", err
+ return nil, err
}
- if len(resp.HashKey) == 0 || resp.HashKey[0] == "" {
- return "", status.Errorf(codes.Internal, "file.StoreFile: failed to set content")
+ if len(resp.HashKey) < len(content) {
+ return nil, status.Errorf(codes.Internal, "file.StoreFile: failed to set content: %d hashes returned, expected %d", len(resp.HashKey), len(content))
}
- return resp.HashKey[0], nil
+ for _, hk := range resp.HashKey {
+ if hk == "" {
+ return nil, status.Errorf(codes.Internal, "file.StoreFile: failed to set content")
+ }
+ }
+ return resp.HashKey, nil
}
func lookup(ctx context.Context, c lookupClient, hashKey string) (*gomapb.FileBlob, error) {
diff --git a/remoteexec/gomainput_test.go b/remoteexec/gomainput_test.go
index beb60de..80fb9a4 100644
--- a/remoteexec/gomainput_test.go
+++ b/remoteexec/gomainput_test.go
@@ -30,15 +30,19 @@
digestCache: cluster.adapter.DigestCache,
}
- hk, err := gi.upload(ctx, &gomapb.FileBlob{
- BlobType: gomapb.FileBlob_FILE.Enum(),
- Content: []byte("dummy"),
+ hks, err := gi.upload(ctx, []*gomapb.FileBlob{
+ {
+ BlobType: gomapb.FileBlob_FILE.Enum(),
+ Content: []byte("dummy"),
+ },
})
if err != nil {
t.Errorf("gi.upload err=%v; want nil", err)
}
- if hk == "" {
- t.Errorf("gi.upload returns hk=empty string; want non empty")
+ for _, hk := range hks {
+ if hk == "" {
+ t.Errorf("gi.upload returns hk=empty string; want non empty")
+ }
}
}
diff --git a/remoteexec/merkletree/merkletree.go b/remoteexec/merkletree/merkletree.go
index b3cc46f..89d4fd1 100644
--- a/remoteexec/merkletree/merkletree.go
+++ b/remoteexec/merkletree/merkletree.go
@@ -211,22 +211,6 @@
// buildtree builds tree at curdir, which is located as dirname.
func (m *MerkleTree) buildTree(ctx context.Context, curdir *rpb.Directory, dirname string) (*rpb.Digest, error) {
logger := log.FromContext(ctx)
- // FIXME: this is workaround for b/71495874
- if len(curdir.Files) == 0 && len(curdir.Symlinks) == 0 && len(curdir.Directories) == 0 {
- // empty dir.
- // foundry doesn't create empty directory.
- // http://b/80406381
- // but, there is case that empty directory must exist
- // http://b/80279190
- // to workaround this put dummy file to make sure empty dir
- // is created.
- emptyFile := digest.Bytes("empty file", nil)
- m.store.Set(emptyFile)
- curdir.Files = append(curdir.Files, &rpb.FileNode{
- Name: ".keep_me",
- Digest: emptyFile.Digest(),
- })
- }
// directory should not have duplicate name.
// http://b/124693412
names := map[string]proto.Message{}
@@ -258,7 +242,7 @@
if !proto.Equal(s, p) {
return nil, fmt.Errorf("duplicate symlink %s in %s: %s != %s", s.Name, dirname, s, p)
}
- logger.Errorf("duplicate symlink %s in %s: %s", s.Name, dirname, s)
+ logger.Infof("duplicate symlink %s in %s: %s", s.Name, dirname, s)
continue
}
names[s.Name] = s
@@ -287,7 +271,7 @@
if !proto.Equal(subdir, p) {
return nil, fmt.Errorf("duplicate dir %s in %s: %s != %s", subdir.Name, dirname, subdir, p)
}
- logger.Errorf("duplicate dir %s in %s: %s", subdir.Name, dirname, subdir)
+ logger.Infof("duplicate dir %s in %s: %s", subdir.Name, dirname, subdir)
continue
}
names[subdir.Name] = subdir
diff --git a/remoteexec/merkletree/merkletree_test.go b/remoteexec/merkletree/merkletree_test.go
index 793e2ac..d7dbb15 100644
--- a/remoteexec/merkletree/merkletree_test.go
+++ b/remoteexec/merkletree/merkletree_test.go
@@ -378,17 +378,13 @@
checkDir(ctx, t, ds, baseDir, "debug",
[]string{"debugger.cc"},
nil, nil)
- // empty dir will have .keep_me file.
- // TODO: remove this when b/71495874 is fixed.
checkDir(ctx, t, ds, baseDir, "test",
- []string{".keep_me"},
- nil, nil)
+ nil, nil, nil)
outDir := checkDir(ctx, t, ds, dir, "out", nil, []string{"Release"}, nil)
releaseDir := checkDir(ctx, t, ds, outDir, "Release", nil, []string{"obj"}, nil)
objDir := checkDir(ctx, t, ds, releaseDir, "obj", nil, []string{"base"}, nil)
- // TODO: make nil instead of []string{".keep_me"} when b/71495874 is fixed.
- checkDir(ctx, t, ds, objDir, "base", []string{".keep_me"}, nil, nil)
+ checkDir(ctx, t, ds, objDir, "base", nil, nil, nil)
tpDir := checkDir(ctx, t, ds, dir, "third_party", nil, []string{"llvm-build", "skia"}, nil)
llvmDir := checkDir(ctx, t, ds, tpDir, "llvm-build", nil, []string{"Release+Asserts"}, nil)
diff --git a/remoteexec/stats.go b/remoteexec/stats.go
new file mode 100644
index 0000000..cc3a0b2
--- /dev/null
+++ b/remoteexec/stats.go
@@ -0,0 +1,51 @@
+// Copyright 2019 The Goma Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package remoteexec
+
+import (
+ "context"
+
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+)
+
+var (
+ numRunningOperations = stats.Int64(
+ "go.chromium.org/goma/server/remoteexec.running-operations",
+ "Number of current running exec operations",
+ stats.UnitDimensionless)
+
+ wrapperCount = stats.Int64(
+ "go.chromium.org/goma/server/remoteexec.wrapper-counts",
+ "Number of requests per wrapper types",
+ stats.UnitDimensionless)
+
+ wrapperTypeKey = tag.MustNewKey("wrapper")
+
+ DefaultViews = []*view.View{
+ {
+ Description: `Number of current running exec operations`,
+ Measure: numRunningOperations,
+ Aggregation: view.Sum(),
+ },
+ {
+ Description: "Number of requests per wrapper types",
+ TagKeys: []tag.Key{
+ wrapperTypeKey,
+ },
+ Measure: wrapperCount,
+ Aggregation: view.Count(),
+ },
+ }
+)
+
+func recordRemoteExecStart(ctx context.Context) {
+ stats.Record(ctx, numRunningOperations.M(1))
+}
+
+func recordRemoteExecFinish(ctx context.Context) {
+ stats.Record(ctx, numRunningOperations.M(-1))
+}
diff --git a/server/limited_sampler.go b/server/limited_sampler.go
index 06d20a2..5b41d23 100644
--- a/server/limited_sampler.go
+++ b/server/limited_sampler.go
@@ -11,6 +11,17 @@
"go.opencensus.io/trace"
)
+const (
+ // same as default sampler
+ // https://github.com/census-instrumentation/opencensus-go/blob/master/trace/sampling.go#L21
+ DefaultTraceFraction = 1e-4
+
+ // trace API limit is 4800/minutes.
+ // https://cloud.google.com/trace/docs/quotas#trace-api-limit
+ // 4800/60/(total number of replicas in the project)
+ DefaultTraceQPS = 0.05
+)
+
type limitedSampler struct {
sampler trace.Sampler
sampleDuration time.Duration
diff --git a/server/remote_sampler.go b/server/remote_sampler.go
deleted file mode 100644
index 330e8d3..0000000
--- a/server/remote_sampler.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The Goma Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package server
-
-import "go.opencensus.io/trace"
-
-type remoteSampler struct {
- sampler trace.Sampler
-}
-
-func (rs remoteSampler) Sample(p trace.SamplingParameters) trace.SamplingDecision {
- if p.HasRemoteParent {
- return trace.SamplingDecision{
- Sample: true,
- }
- }
- return rs.sampler(p)
-}
-
-// NewRemoteSampler returns trace sampler to sample if remote is sampled
-// if remoteSampled is true.
-// if remoteSampled is false or remote is not sampled, use sampler.
-func NewRemoteSampler(remoteSampled bool, sampler trace.Sampler) trace.Sampler {
- if remoteSampled {
- return remoteSampler{
- sampler: sampler,
- }.Sample
- }
- return sampler
-}