| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/dataproc/v1beta2/jobs.proto |
| |
| package dataproc // import "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2" |
| |
| import proto "github.com/golang/protobuf/proto" |
| import fmt "fmt" |
| import math "math" |
| import empty "github.com/golang/protobuf/ptypes/empty" |
| import timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| import _ "google.golang.org/genproto/googleapis/api/annotations" |
| import field_mask "google.golang.org/genproto/protobuf/field_mask" |
| |
| import ( |
| context "golang.org/x/net/context" |
| grpc "google.golang.org/grpc" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package |
| |
| // The Log4j level for job execution. When running an |
| // [Apache Hive](http://hive.apache.org/) job, Cloud |
| // Dataproc configures the Hive client to an equivalent verbosity level. |
| type LoggingConfig_Level int32 |
| |
| const ( |
| // Level is unspecified. Use default level for log4j. |
| LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0 |
| // Use ALL level for log4j. |
| LoggingConfig_ALL LoggingConfig_Level = 1 |
| // Use TRACE level for log4j. |
| LoggingConfig_TRACE LoggingConfig_Level = 2 |
| // Use DEBUG level for log4j. |
| LoggingConfig_DEBUG LoggingConfig_Level = 3 |
| // Use INFO level for log4j. |
| LoggingConfig_INFO LoggingConfig_Level = 4 |
| // Use WARN level for log4j. |
| LoggingConfig_WARN LoggingConfig_Level = 5 |
| // Use ERROR level for log4j. |
| LoggingConfig_ERROR LoggingConfig_Level = 6 |
| // Use FATAL level for log4j. |
| LoggingConfig_FATAL LoggingConfig_Level = 7 |
| // Turn off log4j. |
| LoggingConfig_OFF LoggingConfig_Level = 8 |
| ) |
| |
| var LoggingConfig_Level_name = map[int32]string{ |
| 0: "LEVEL_UNSPECIFIED", |
| 1: "ALL", |
| 2: "TRACE", |
| 3: "DEBUG", |
| 4: "INFO", |
| 5: "WARN", |
| 6: "ERROR", |
| 7: "FATAL", |
| 8: "OFF", |
| } |
| var LoggingConfig_Level_value = map[string]int32{ |
| "LEVEL_UNSPECIFIED": 0, |
| "ALL": 1, |
| "TRACE": 2, |
| "DEBUG": 3, |
| "INFO": 4, |
| "WARN": 5, |
| "ERROR": 6, |
| "FATAL": 7, |
| "OFF": 8, |
| } |
| |
| func (x LoggingConfig_Level) String() string { |
| return proto.EnumName(LoggingConfig_Level_name, int32(x)) |
| } |
| func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{0, 0} |
| } |
| |
| // The job state. |
| type JobStatus_State int32 |
| |
| const ( |
| // The job state is unknown. |
| JobStatus_STATE_UNSPECIFIED JobStatus_State = 0 |
| // The job is pending; it has been submitted, but is not yet running. |
| JobStatus_PENDING JobStatus_State = 1 |
| // Job has been received by the service and completed initial setup; |
| // it will soon be submitted to the cluster. |
| JobStatus_SETUP_DONE JobStatus_State = 8 |
| // The job is running on the cluster. |
| JobStatus_RUNNING JobStatus_State = 2 |
| // A CancelJob request has been received, but is pending. |
| JobStatus_CANCEL_PENDING JobStatus_State = 3 |
| // Transient in-flight resources have been canceled, and the request to |
| // cancel the running job has been issued to the cluster. |
| JobStatus_CANCEL_STARTED JobStatus_State = 7 |
| // The job cancellation was successful. |
| JobStatus_CANCELLED JobStatus_State = 4 |
| // The job has completed successfully. |
| JobStatus_DONE JobStatus_State = 5 |
| // The job has completed, but encountered an error. |
| JobStatus_ERROR JobStatus_State = 6 |
| // Job attempt has failed. The detail field contains failure details for |
| // this attempt. |
| // |
| // Applies to restartable jobs only. |
| JobStatus_ATTEMPT_FAILURE JobStatus_State = 9 |
| ) |
| |
| var JobStatus_State_name = map[int32]string{ |
| 0: "STATE_UNSPECIFIED", |
| 1: "PENDING", |
| 8: "SETUP_DONE", |
| 2: "RUNNING", |
| 3: "CANCEL_PENDING", |
| 7: "CANCEL_STARTED", |
| 4: "CANCELLED", |
| 5: "DONE", |
| 6: "ERROR", |
| 9: "ATTEMPT_FAILURE", |
| } |
| var JobStatus_State_value = map[string]int32{ |
| "STATE_UNSPECIFIED": 0, |
| "PENDING": 1, |
| "SETUP_DONE": 8, |
| "RUNNING": 2, |
| "CANCEL_PENDING": 3, |
| "CANCEL_STARTED": 7, |
| "CANCELLED": 4, |
| "DONE": 5, |
| "ERROR": 6, |
| "ATTEMPT_FAILURE": 9, |
| } |
| |
| func (x JobStatus_State) String() string { |
| return proto.EnumName(JobStatus_State_name, int32(x)) |
| } |
| func (JobStatus_State) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{9, 0} |
| } |
| |
| // The job substate. |
| type JobStatus_Substate int32 |
| |
| const ( |
| // The job substate is unknown. |
| JobStatus_UNSPECIFIED JobStatus_Substate = 0 |
| // The Job is submitted to the agent. |
| // |
| // Applies to RUNNING state. |
| JobStatus_SUBMITTED JobStatus_Substate = 1 |
| // The Job has been received and is awaiting execution (it may be waiting |
| // for a condition to be met). See the "details" field for the reason for |
| // the delay. |
| // |
| // Applies to RUNNING state. |
| JobStatus_QUEUED JobStatus_Substate = 2 |
| // The agent-reported status is out of date, which may be caused by a |
| // loss of communication between the agent and Cloud Dataproc. If the |
| // agent does not send a timely update, the job will fail. |
| // |
| // Applies to RUNNING state. |
| JobStatus_STALE_STATUS JobStatus_Substate = 3 |
| ) |
| |
| var JobStatus_Substate_name = map[int32]string{ |
| 0: "UNSPECIFIED", |
| 1: "SUBMITTED", |
| 2: "QUEUED", |
| 3: "STALE_STATUS", |
| } |
| var JobStatus_Substate_value = map[string]int32{ |
| "UNSPECIFIED": 0, |
| "SUBMITTED": 1, |
| "QUEUED": 2, |
| "STALE_STATUS": 3, |
| } |
| |
| func (x JobStatus_Substate) String() string { |
| return proto.EnumName(JobStatus_Substate_name, int32(x)) |
| } |
| func (JobStatus_Substate) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{9, 1} |
| } |
| |
| // The application state, corresponding to |
| // <code>YarnProtos.YarnApplicationStateProto</code>. |
| type YarnApplication_State int32 |
| |
| const ( |
| // Status is unspecified. |
| YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0 |
| // Status is NEW. |
| YarnApplication_NEW YarnApplication_State = 1 |
| // Status is NEW_SAVING. |
| YarnApplication_NEW_SAVING YarnApplication_State = 2 |
| // Status is SUBMITTED. |
| YarnApplication_SUBMITTED YarnApplication_State = 3 |
| // Status is ACCEPTED. |
| YarnApplication_ACCEPTED YarnApplication_State = 4 |
| // Status is RUNNING. |
| YarnApplication_RUNNING YarnApplication_State = 5 |
| // Status is FINISHED. |
| YarnApplication_FINISHED YarnApplication_State = 6 |
| // Status is FAILED. |
| YarnApplication_FAILED YarnApplication_State = 7 |
| // Status is KILLED. |
| YarnApplication_KILLED YarnApplication_State = 8 |
| ) |
| |
| var YarnApplication_State_name = map[int32]string{ |
| 0: "STATE_UNSPECIFIED", |
| 1: "NEW", |
| 2: "NEW_SAVING", |
| 3: "SUBMITTED", |
| 4: "ACCEPTED", |
| 5: "RUNNING", |
| 6: "FINISHED", |
| 7: "FAILED", |
| 8: "KILLED", |
| } |
| var YarnApplication_State_value = map[string]int32{ |
| "STATE_UNSPECIFIED": 0, |
| "NEW": 1, |
| "NEW_SAVING": 2, |
| "SUBMITTED": 3, |
| "ACCEPTED": 4, |
| "RUNNING": 5, |
| "FINISHED": 6, |
| "FAILED": 7, |
| "KILLED": 8, |
| } |
| |
| func (x YarnApplication_State) String() string { |
| return proto.EnumName(YarnApplication_State_name, int32(x)) |
| } |
| func (YarnApplication_State) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{11, 0} |
| } |
| |
| // A matcher that specifies categories of job states. |
| type ListJobsRequest_JobStateMatcher int32 |
| |
| const ( |
| // Match all jobs, regardless of state. |
| ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0 |
| // Only match jobs in non-terminal states: PENDING, RUNNING, or |
| // CANCEL_PENDING. |
| ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1 |
| // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. |
| ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2 |
| ) |
| |
| var ListJobsRequest_JobStateMatcher_name = map[int32]string{ |
| 0: "ALL", |
| 1: "ACTIVE", |
| 2: "NON_ACTIVE", |
| } |
| var ListJobsRequest_JobStateMatcher_value = map[string]int32{ |
| "ALL": 0, |
| "ACTIVE": 1, |
| "NON_ACTIVE": 2, |
| } |
| |
| func (x ListJobsRequest_JobStateMatcher) String() string { |
| return proto.EnumName(ListJobsRequest_JobStateMatcher_name, int32(x)) |
| } |
| func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{16, 0} |
| } |
| |
| // The runtime logging config of the job. |
| type LoggingConfig struct { |
| // The per-package log levels for the driver. This may include |
| // "root" package name to configure rootLogger. |
| // Examples: |
| // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' |
| DriverLogLevels map[string]LoggingConfig_Level `protobuf:"bytes,2,rep,name=driver_log_levels,json=driverLogLevels,proto3" json:"driver_log_levels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=google.cloud.dataproc.v1beta2.LoggingConfig_Level"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *LoggingConfig) Reset() { *m = LoggingConfig{} } |
| func (m *LoggingConfig) String() string { return proto.CompactTextString(m) } |
| func (*LoggingConfig) ProtoMessage() {} |
| func (*LoggingConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{0} |
| } |
| func (m *LoggingConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_LoggingConfig.Unmarshal(m, b) |
| } |
| func (m *LoggingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_LoggingConfig.Marshal(b, m, deterministic) |
| } |
| func (dst *LoggingConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_LoggingConfig.Merge(dst, src) |
| } |
| func (m *LoggingConfig) XXX_Size() int { |
| return xxx_messageInfo_LoggingConfig.Size(m) |
| } |
| func (m *LoggingConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_LoggingConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_LoggingConfig proto.InternalMessageInfo |
| |
| func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level { |
| if m != nil { |
| return m.DriverLogLevels |
| } |
| return nil |
| } |
| |
| // A Cloud Dataproc job for running |
| // [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) |
| // jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). |
| type HadoopJob struct { |
| // Required. Indicates the location of the driver's main class. Specify |
| // either the jar file that contains the main class or the main class name. |
| // To specify both, add the jar file to `jar_file_uris`, and then specify |
| // the main class name in this property. |
| // |
| // Types that are valid to be assigned to Driver: |
| // *HadoopJob_MainJarFileUri |
| // *HadoopJob_MainClass |
| Driver isHadoopJob_Driver `protobuf_oneof:"driver"` |
| // Optional. The arguments to pass to the driver. Do not |
| // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job |
| // properties, since a collision may occur that causes an incorrect job |
| // submission. |
| Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. Jar file URIs to add to the CLASSPATHs of the |
| // Hadoop driver and tasks. |
| JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied |
| // to the working directory of Hadoop drivers and distributed tasks. Useful |
| // for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory of |
| // Hadoop drivers and tasks. Supported file types: |
| // .jar, .tar, .tar.gz, .tgz, or .zip. |
| ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure Hadoop. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in /etc/hadoop/conf/*-site and |
| // classes in user code. |
| Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *HadoopJob) Reset() { *m = HadoopJob{} } |
| func (m *HadoopJob) String() string { return proto.CompactTextString(m) } |
| func (*HadoopJob) ProtoMessage() {} |
| func (*HadoopJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{1} |
| } |
| func (m *HadoopJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_HadoopJob.Unmarshal(m, b) |
| } |
| func (m *HadoopJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_HadoopJob.Marshal(b, m, deterministic) |
| } |
| func (dst *HadoopJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_HadoopJob.Merge(dst, src) |
| } |
| func (m *HadoopJob) XXX_Size() int { |
| return xxx_messageInfo_HadoopJob.Size(m) |
| } |
| func (m *HadoopJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_HadoopJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_HadoopJob proto.InternalMessageInfo |
| |
| type isHadoopJob_Driver interface { |
| isHadoopJob_Driver() |
| } |
| |
| type HadoopJob_MainJarFileUri struct { |
| MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"` |
| } |
| |
| type HadoopJob_MainClass struct { |
| MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"` |
| } |
| |
| func (*HadoopJob_MainJarFileUri) isHadoopJob_Driver() {} |
| |
| func (*HadoopJob_MainClass) isHadoopJob_Driver() {} |
| |
| func (m *HadoopJob) GetDriver() isHadoopJob_Driver { |
| if m != nil { |
| return m.Driver |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetMainJarFileUri() string { |
| if x, ok := m.GetDriver().(*HadoopJob_MainJarFileUri); ok { |
| return x.MainJarFileUri |
| } |
| return "" |
| } |
| |
| func (m *HadoopJob) GetMainClass() string { |
| if x, ok := m.GetDriver().(*HadoopJob_MainClass); ok { |
| return x.MainClass |
| } |
| return "" |
| } |
| |
| func (m *HadoopJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _HadoopJob_OneofMarshaler, _HadoopJob_OneofUnmarshaler, _HadoopJob_OneofSizer, []interface{}{ |
| (*HadoopJob_MainJarFileUri)(nil), |
| (*HadoopJob_MainClass)(nil), |
| } |
| } |
| |
| func _HadoopJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*HadoopJob) |
| // driver |
| switch x := m.Driver.(type) { |
| case *HadoopJob_MainJarFileUri: |
| b.EncodeVarint(1<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.MainJarFileUri) |
| case *HadoopJob_MainClass: |
| b.EncodeVarint(2<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.MainClass) |
| case nil: |
| default: |
| return fmt.Errorf("HadoopJob.Driver has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _HadoopJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*HadoopJob) |
| switch tag { |
| case 1: // driver.main_jar_file_uri |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Driver = &HadoopJob_MainJarFileUri{x} |
| return true, err |
| case 2: // driver.main_class |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Driver = &HadoopJob_MainClass{x} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _HadoopJob_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*HadoopJob) |
| // driver |
| switch x := m.Driver.(type) { |
| case *HadoopJob_MainJarFileUri: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.MainJarFileUri))) |
| n += len(x.MainJarFileUri) |
| case *HadoopJob_MainClass: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.MainClass))) |
| n += len(x.MainClass) |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) |
| // applications on YARN. |
| type SparkJob struct { |
| // Required. The specification of the main method to call to drive the job. |
| // Specify either the jar file that contains the main class or the main class |
| // name. To pass both a main jar and a main class in that jar, add the jar to |
| // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. |
| // |
| // Types that are valid to be assigned to Driver: |
| // *SparkJob_MainJarFileUri |
| // *SparkJob_MainClass |
| Driver isSparkJob_Driver `protobuf_oneof:"driver"` |
| // Optional. The arguments to pass to the driver. Do not include arguments, |
| // such as `--conf`, that can be set as job properties, since a collision may |
| // occur that causes an incorrect job submission. |
| Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the |
| // Spark driver and tasks. |
| JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. HCFS URIs of files to be copied to the working directory of |
| // Spark drivers and distributed tasks. Useful for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory |
| // of Spark drivers and tasks. Supported file types: |
| // .jar, .tar, .tar.gz, .tgz, and .zip. |
| ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure Spark. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in |
| // /etc/spark/conf/spark-defaults.conf and classes in user code. |
| Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SparkJob) Reset() { *m = SparkJob{} } |
| func (m *SparkJob) String() string { return proto.CompactTextString(m) } |
| func (*SparkJob) ProtoMessage() {} |
| func (*SparkJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{2} |
| } |
| func (m *SparkJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SparkJob.Unmarshal(m, b) |
| } |
| func (m *SparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SparkJob.Marshal(b, m, deterministic) |
| } |
| func (dst *SparkJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SparkJob.Merge(dst, src) |
| } |
| func (m *SparkJob) XXX_Size() int { |
| return xxx_messageInfo_SparkJob.Size(m) |
| } |
| func (m *SparkJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_SparkJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SparkJob proto.InternalMessageInfo |
| |
| type isSparkJob_Driver interface { |
| isSparkJob_Driver() |
| } |
| |
| type SparkJob_MainJarFileUri struct { |
| MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"` |
| } |
| |
| type SparkJob_MainClass struct { |
| MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"` |
| } |
| |
| func (*SparkJob_MainJarFileUri) isSparkJob_Driver() {} |
| |
| func (*SparkJob_MainClass) isSparkJob_Driver() {} |
| |
| func (m *SparkJob) GetDriver() isSparkJob_Driver { |
| if m != nil { |
| return m.Driver |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetMainJarFileUri() string { |
| if x, ok := m.GetDriver().(*SparkJob_MainJarFileUri); ok { |
| return x.MainJarFileUri |
| } |
| return "" |
| } |
| |
| func (m *SparkJob) GetMainClass() string { |
| if x, ok := m.GetDriver().(*SparkJob_MainClass); ok { |
| return x.MainClass |
| } |
| return "" |
| } |
| |
| func (m *SparkJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _SparkJob_OneofMarshaler, _SparkJob_OneofUnmarshaler, _SparkJob_OneofSizer, []interface{}{ |
| (*SparkJob_MainJarFileUri)(nil), |
| (*SparkJob_MainClass)(nil), |
| } |
| } |
| |
| func _SparkJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*SparkJob) |
| // driver |
| switch x := m.Driver.(type) { |
| case *SparkJob_MainJarFileUri: |
| b.EncodeVarint(1<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.MainJarFileUri) |
| case *SparkJob_MainClass: |
| b.EncodeVarint(2<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.MainClass) |
| case nil: |
| default: |
| return fmt.Errorf("SparkJob.Driver has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _SparkJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*SparkJob) |
| switch tag { |
| case 1: // driver.main_jar_file_uri |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Driver = &SparkJob_MainJarFileUri{x} |
| return true, err |
| case 2: // driver.main_class |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Driver = &SparkJob_MainClass{x} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _SparkJob_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*SparkJob) |
| // driver |
| switch x := m.Driver.(type) { |
| case *SparkJob_MainJarFileUri: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.MainJarFileUri))) |
| n += len(x.MainJarFileUri) |
| case *SparkJob_MainClass: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.MainClass))) |
| n += len(x.MainClass) |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // A Cloud Dataproc job for running |
| // [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
| // applications on YARN. |
| type PySparkJob struct { |
| // Required. The HCFS URI of the main Python file to use as the driver. Must |
| // be a .py file. |
| MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"` |
| // Optional. The arguments to pass to the driver. Do not include arguments, |
| // such as `--conf`, that can be set as job properties, since a collision may |
| // occur that causes an incorrect job submission. |
| Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. HCFS file URIs of Python files to pass to the PySpark |
| // framework. Supported file types: .py, .egg, and .zip. |
| PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the |
| // Python driver and tasks. |
| JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. HCFS URIs of files to be copied to the working directory of |
| // Python drivers and distributed tasks. Useful for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory of |
| // .jar, .tar, .tar.gz, .tgz, and .zip. |
| ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure PySpark. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in |
| // /etc/spark/conf/spark-defaults.conf and classes in user code. |
| Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PySparkJob) Reset() { *m = PySparkJob{} } |
| func (m *PySparkJob) String() string { return proto.CompactTextString(m) } |
| func (*PySparkJob) ProtoMessage() {} |
| func (*PySparkJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{3} |
| } |
| func (m *PySparkJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PySparkJob.Unmarshal(m, b) |
| } |
| func (m *PySparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PySparkJob.Marshal(b, m, deterministic) |
| } |
| func (dst *PySparkJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PySparkJob.Merge(dst, src) |
| } |
| func (m *PySparkJob) XXX_Size() int { |
| return xxx_messageInfo_PySparkJob.Size(m) |
| } |
| func (m *PySparkJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_PySparkJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PySparkJob proto.InternalMessageInfo |
| |
| func (m *PySparkJob) GetMainPythonFileUri() string { |
| if m != nil { |
| return m.MainPythonFileUri |
| } |
| return "" |
| } |
| |
| func (m *PySparkJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetPythonFileUris() []string { |
| if m != nil { |
| return m.PythonFileUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // A list of queries to run on a cluster. |
| type QueryList struct { |
| // Required. The queries to execute. You do not need to terminate a query |
| // with a semicolon. Multiple queries can be specified in one string |
| // by separating each with a semicolon. Here is an example of an Cloud |
| // Dataproc API snippet that uses a QueryList to specify a HiveJob: |
| // |
| // "hiveJob": { |
| // "queryList": { |
| // "queries": [ |
| // "query1", |
| // "query2", |
| // "query3;query4", |
| // ] |
| // } |
| // } |
| Queries []string `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *QueryList) Reset() { *m = QueryList{} } |
| func (m *QueryList) String() string { return proto.CompactTextString(m) } |
| func (*QueryList) ProtoMessage() {} |
| func (*QueryList) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{4} |
| } |
| func (m *QueryList) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_QueryList.Unmarshal(m, b) |
| } |
| func (m *QueryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_QueryList.Marshal(b, m, deterministic) |
| } |
| func (dst *QueryList) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_QueryList.Merge(dst, src) |
| } |
| func (m *QueryList) XXX_Size() int { |
| return xxx_messageInfo_QueryList.Size(m) |
| } |
| func (m *QueryList) XXX_DiscardUnknown() { |
| xxx_messageInfo_QueryList.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_QueryList proto.InternalMessageInfo |
| |
| func (m *QueryList) GetQueries() []string { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| // A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) |
| // queries on YARN. |
| type HiveJob struct { |
| // Required. The sequence of Hive queries to execute, specified as either |
| // an HCFS file URI or a list of queries. |
| // |
| // Types that are valid to be assigned to Queries: |
| // *HiveJob_QueryFileUri |
| // *HiveJob_QueryList |
| Queries isHiveJob_Queries `protobuf_oneof:"queries"` |
| // Optional. Whether to continue executing queries if a query fails. |
| // The default value is `false`. Setting to `true` can be useful when executing |
| // independent parallel queries. |
| ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"` |
| // Optional. Mapping of query variable names to values (equivalent to the |
| // Hive command: `SET name="value";`). |
| ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. A mapping of property names and values, used to configure Hive. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, |
| // /etc/hive/conf/hive-site.xml, and classes in user code. |
| Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATH of the |
| // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes |
| // and UDFs. |
| JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *HiveJob) Reset() { *m = HiveJob{} } |
| func (m *HiveJob) String() string { return proto.CompactTextString(m) } |
| func (*HiveJob) ProtoMessage() {} |
| func (*HiveJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{5} |
| } |
| func (m *HiveJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_HiveJob.Unmarshal(m, b) |
| } |
| func (m *HiveJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_HiveJob.Marshal(b, m, deterministic) |
| } |
| func (dst *HiveJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_HiveJob.Merge(dst, src) |
| } |
| func (m *HiveJob) XXX_Size() int { |
| return xxx_messageInfo_HiveJob.Size(m) |
| } |
| func (m *HiveJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_HiveJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_HiveJob proto.InternalMessageInfo |
| |
| type isHiveJob_Queries interface { |
| isHiveJob_Queries() |
| } |
| |
| type HiveJob_QueryFileUri struct { |
| QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"` |
| } |
| |
| type HiveJob_QueryList struct { |
| QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"` |
| } |
| |
| func (*HiveJob_QueryFileUri) isHiveJob_Queries() {} |
| |
| func (*HiveJob_QueryList) isHiveJob_Queries() {} |
| |
| func (m *HiveJob) GetQueries() isHiveJob_Queries { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetQueryFileUri() string { |
| if x, ok := m.GetQueries().(*HiveJob_QueryFileUri); ok { |
| return x.QueryFileUri |
| } |
| return "" |
| } |
| |
| func (m *HiveJob) GetQueryList() *QueryList { |
| if x, ok := m.GetQueries().(*HiveJob_QueryList); ok { |
| return x.QueryList |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetContinueOnFailure() bool { |
| if m != nil { |
| return m.ContinueOnFailure |
| } |
| return false |
| } |
| |
| func (m *HiveJob) GetScriptVariables() map[string]string { |
| if m != nil { |
| return m.ScriptVariables |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _HiveJob_OneofMarshaler, _HiveJob_OneofUnmarshaler, _HiveJob_OneofSizer, []interface{}{ |
| (*HiveJob_QueryFileUri)(nil), |
| (*HiveJob_QueryList)(nil), |
| } |
| } |
| |
| func _HiveJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*HiveJob) |
| // queries |
| switch x := m.Queries.(type) { |
| case *HiveJob_QueryFileUri: |
| b.EncodeVarint(1<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.QueryFileUri) |
| case *HiveJob_QueryList: |
| b.EncodeVarint(2<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.QueryList); err != nil { |
| return err |
| } |
| case nil: |
| default: |
| return fmt.Errorf("HiveJob.Queries has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _HiveJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*HiveJob) |
| switch tag { |
| case 1: // queries.query_file_uri |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Queries = &HiveJob_QueryFileUri{x} |
| return true, err |
| case 2: // queries.query_list |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(QueryList) |
| err := b.DecodeMessage(msg) |
| m.Queries = &HiveJob_QueryList{msg} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _HiveJob_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*HiveJob) |
| // queries |
| switch x := m.Queries.(type) { |
| case *HiveJob_QueryFileUri: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.QueryFileUri))) |
| n += len(x.QueryFileUri) |
| case *HiveJob_QueryList: |
| s := proto.Size(x.QueryList) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) |
| // queries. |
| type SparkSqlJob struct { |
| // Required. The sequence of Spark SQL queries to execute, specified as |
| // either an HCFS file URI or as a list of queries. |
| // |
| // Types that are valid to be assigned to Queries: |
| // *SparkSqlJob_QueryFileUri |
| // *SparkSqlJob_QueryList |
| Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` |
| // Optional. Mapping of query variable names to values (equivalent to the |
| // Spark SQL command: SET `name="value";`). |
| ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. A mapping of property names to values, used to configure |
| // Spark SQL's SparkConf. Properties that conflict with values set by the |
| // Cloud Dataproc API may be overwritten. |
| Properties map[string]string `protobuf:"bytes,4,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. |
| JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SparkSqlJob) Reset() { *m = SparkSqlJob{} } |
| func (m *SparkSqlJob) String() string { return proto.CompactTextString(m) } |
| func (*SparkSqlJob) ProtoMessage() {} |
| func (*SparkSqlJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{6} |
| } |
| func (m *SparkSqlJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SparkSqlJob.Unmarshal(m, b) |
| } |
| func (m *SparkSqlJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SparkSqlJob.Marshal(b, m, deterministic) |
| } |
| func (dst *SparkSqlJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SparkSqlJob.Merge(dst, src) |
| } |
| func (m *SparkSqlJob) XXX_Size() int { |
| return xxx_messageInfo_SparkSqlJob.Size(m) |
| } |
| func (m *SparkSqlJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_SparkSqlJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SparkSqlJob proto.InternalMessageInfo |
| |
| type isSparkSqlJob_Queries interface { |
| isSparkSqlJob_Queries() |
| } |
| |
| type SparkSqlJob_QueryFileUri struct { |
| QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"` |
| } |
| |
| type SparkSqlJob_QueryList struct { |
| QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"` |
| } |
| |
| func (*SparkSqlJob_QueryFileUri) isSparkSqlJob_Queries() {} |
| |
| func (*SparkSqlJob_QueryList) isSparkSqlJob_Queries() {} |
| |
| func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetQueryFileUri() string { |
| if x, ok := m.GetQueries().(*SparkSqlJob_QueryFileUri); ok { |
| return x.QueryFileUri |
| } |
| return "" |
| } |
| |
| func (m *SparkSqlJob) GetQueryList() *QueryList { |
| if x, ok := m.GetQueries().(*SparkSqlJob_QueryList); ok { |
| return x.QueryList |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetScriptVariables() map[string]string { |
| if m != nil { |
| return m.ScriptVariables |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _SparkSqlJob_OneofMarshaler, _SparkSqlJob_OneofUnmarshaler, _SparkSqlJob_OneofSizer, []interface{}{ |
| (*SparkSqlJob_QueryFileUri)(nil), |
| (*SparkSqlJob_QueryList)(nil), |
| } |
| } |
| |
| func _SparkSqlJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*SparkSqlJob) |
| // queries |
| switch x := m.Queries.(type) { |
| case *SparkSqlJob_QueryFileUri: |
| b.EncodeVarint(1<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.QueryFileUri) |
| case *SparkSqlJob_QueryList: |
| b.EncodeVarint(2<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.QueryList); err != nil { |
| return err |
| } |
| case nil: |
| default: |
| return fmt.Errorf("SparkSqlJob.Queries has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _SparkSqlJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*SparkSqlJob) |
| switch tag { |
| case 1: // queries.query_file_uri |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Queries = &SparkSqlJob_QueryFileUri{x} |
| return true, err |
| case 2: // queries.query_list |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(QueryList) |
| err := b.DecodeMessage(msg) |
| m.Queries = &SparkSqlJob_QueryList{msg} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _SparkSqlJob_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*SparkSqlJob) |
| // queries |
| switch x := m.Queries.(type) { |
| case *SparkSqlJob_QueryFileUri: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.QueryFileUri))) |
| n += len(x.QueryFileUri) |
| case *SparkSqlJob_QueryList: |
| s := proto.Size(x.QueryList) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) |
| // queries on YARN. |
| type PigJob struct { |
| // Required. The sequence of Pig queries to execute, specified as an HCFS |
| // file URI or a list of queries. |
| // |
| // Types that are valid to be assigned to Queries: |
| // *PigJob_QueryFileUri |
| // *PigJob_QueryList |
| Queries isPigJob_Queries `protobuf_oneof:"queries"` |
| // Optional. Whether to continue executing queries if a query fails. |
| // The default value is `false`. Setting to `true` can be useful when executing |
| // independent parallel queries. |
| ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"` |
| // Optional. Mapping of query variable names to values (equivalent to the Pig |
| // command: `name=[value]`). |
| ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. A mapping of property names to values, used to configure Pig. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, |
| // /etc/pig/conf/pig.properties, and classes in user code. |
| Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATH of |
| // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. |
| JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PigJob) Reset() { *m = PigJob{} } |
| func (m *PigJob) String() string { return proto.CompactTextString(m) } |
| func (*PigJob) ProtoMessage() {} |
| func (*PigJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{7} |
| } |
| func (m *PigJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PigJob.Unmarshal(m, b) |
| } |
| func (m *PigJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PigJob.Marshal(b, m, deterministic) |
| } |
| func (dst *PigJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PigJob.Merge(dst, src) |
| } |
| func (m *PigJob) XXX_Size() int { |
| return xxx_messageInfo_PigJob.Size(m) |
| } |
| func (m *PigJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_PigJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PigJob proto.InternalMessageInfo |
| |
| type isPigJob_Queries interface { |
| isPigJob_Queries() |
| } |
| |
| type PigJob_QueryFileUri struct { |
| QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"` |
| } |
| |
| type PigJob_QueryList struct { |
| QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"` |
| } |
| |
| func (*PigJob_QueryFileUri) isPigJob_Queries() {} |
| |
| func (*PigJob_QueryList) isPigJob_Queries() {} |
| |
| func (m *PigJob) GetQueries() isPigJob_Queries { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetQueryFileUri() string { |
| if x, ok := m.GetQueries().(*PigJob_QueryFileUri); ok { |
| return x.QueryFileUri |
| } |
| return "" |
| } |
| |
| func (m *PigJob) GetQueryList() *QueryList { |
| if x, ok := m.GetQueries().(*PigJob_QueryList); ok { |
| return x.QueryList |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetContinueOnFailure() bool { |
| if m != nil { |
| return m.ContinueOnFailure |
| } |
| return false |
| } |
| |
| func (m *PigJob) GetScriptVariables() map[string]string { |
| if m != nil { |
| return m.ScriptVariables |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _PigJob_OneofMarshaler, _PigJob_OneofUnmarshaler, _PigJob_OneofSizer, []interface{}{ |
| (*PigJob_QueryFileUri)(nil), |
| (*PigJob_QueryList)(nil), |
| } |
| } |
| |
| func _PigJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*PigJob) |
| // queries |
| switch x := m.Queries.(type) { |
| case *PigJob_QueryFileUri: |
| b.EncodeVarint(1<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.QueryFileUri) |
| case *PigJob_QueryList: |
| b.EncodeVarint(2<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.QueryList); err != nil { |
| return err |
| } |
| case nil: |
| default: |
| return fmt.Errorf("PigJob.Queries has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _PigJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*PigJob) |
| switch tag { |
| case 1: // queries.query_file_uri |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Queries = &PigJob_QueryFileUri{x} |
| return true, err |
| case 2: // queries.query_list |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(QueryList) |
| err := b.DecodeMessage(msg) |
| m.Queries = &PigJob_QueryList{msg} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _PigJob_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*PigJob) |
| // queries |
| switch x := m.Queries.(type) { |
| case *PigJob_QueryFileUri: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.QueryFileUri))) |
| n += len(x.QueryFileUri) |
| case *PigJob_QueryList: |
| s := proto.Size(x.QueryList) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // Cloud Dataproc job config. |
| type JobPlacement struct { |
| // Required. The name of the cluster where the job will be submitted. |
| ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` |
| // Output only. A cluster UUID generated by the Cloud Dataproc service when |
| // the job is submitted. |
| ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobPlacement) Reset() { *m = JobPlacement{} } |
| func (m *JobPlacement) String() string { return proto.CompactTextString(m) } |
| func (*JobPlacement) ProtoMessage() {} |
| func (*JobPlacement) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{8} |
| } |
| func (m *JobPlacement) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobPlacement.Unmarshal(m, b) |
| } |
| func (m *JobPlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobPlacement.Marshal(b, m, deterministic) |
| } |
| func (dst *JobPlacement) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobPlacement.Merge(dst, src) |
| } |
| func (m *JobPlacement) XXX_Size() int { |
| return xxx_messageInfo_JobPlacement.Size(m) |
| } |
| func (m *JobPlacement) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobPlacement.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobPlacement proto.InternalMessageInfo |
| |
| func (m *JobPlacement) GetClusterName() string { |
| if m != nil { |
| return m.ClusterName |
| } |
| return "" |
| } |
| |
| func (m *JobPlacement) GetClusterUuid() string { |
| if m != nil { |
| return m.ClusterUuid |
| } |
| return "" |
| } |
| |
| // Cloud Dataproc job status. |
| type JobStatus struct { |
| // Output only. A state message specifying the overall job state. |
| State JobStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_State" json:"state,omitempty"` |
| // Output only. Optional job state details, such as an error |
| // description if the state is <code>ERROR</code>. |
| Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` |
| // Output only. The time when this state was entered. |
| StateStartTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"` |
| // Output only. Additional state information, which includes |
| // status reported by the agent. |
| Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_Substate" json:"substate,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobStatus) Reset() { *m = JobStatus{} } |
| func (m *JobStatus) String() string { return proto.CompactTextString(m) } |
| func (*JobStatus) ProtoMessage() {} |
| func (*JobStatus) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{9} |
| } |
| func (m *JobStatus) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobStatus.Unmarshal(m, b) |
| } |
| func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobStatus.Marshal(b, m, deterministic) |
| } |
| func (dst *JobStatus) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobStatus.Merge(dst, src) |
| } |
| func (m *JobStatus) XXX_Size() int { |
| return xxx_messageInfo_JobStatus.Size(m) |
| } |
| func (m *JobStatus) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobStatus.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobStatus proto.InternalMessageInfo |
| |
| func (m *JobStatus) GetState() JobStatus_State { |
| if m != nil { |
| return m.State |
| } |
| return JobStatus_STATE_UNSPECIFIED |
| } |
| |
| func (m *JobStatus) GetDetails() string { |
| if m != nil { |
| return m.Details |
| } |
| return "" |
| } |
| |
| func (m *JobStatus) GetStateStartTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.StateStartTime |
| } |
| return nil |
| } |
| |
| func (m *JobStatus) GetSubstate() JobStatus_Substate { |
| if m != nil { |
| return m.Substate |
| } |
| return JobStatus_UNSPECIFIED |
| } |
| |
| // Encapsulates the full scoping used to reference a job. |
| type JobReference struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Optional. The job ID, which must be unique within the project. The job ID |
| // is generated by the server upon job submission or provided by the user as a |
| // means to perform retries without creating duplicate jobs. The ID must |
| // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or |
| // hyphens (-). The maximum length is 100 characters. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobReference) Reset() { *m = JobReference{} } |
| func (m *JobReference) String() string { return proto.CompactTextString(m) } |
| func (*JobReference) ProtoMessage() {} |
| func (*JobReference) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{10} |
| } |
| func (m *JobReference) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobReference.Unmarshal(m, b) |
| } |
| func (m *JobReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobReference.Marshal(b, m, deterministic) |
| } |
| func (dst *JobReference) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobReference.Merge(dst, src) |
| } |
| func (m *JobReference) XXX_Size() int { |
| return xxx_messageInfo_JobReference.Size(m) |
| } |
| func (m *JobReference) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobReference.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobReference proto.InternalMessageInfo |
| |
| func (m *JobReference) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *JobReference) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| // A YARN application created by a job. Application information is a subset of |
| // <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>. |
| // |
| // **Beta Feature**: This report is available for testing purposes only. It may |
| // be changed before final release. |
| type YarnApplication struct { |
| // Required. The application name. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // Required. The application state. |
| State YarnApplication_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.YarnApplication_State" json:"state,omitempty"` |
| // Required. The numerical progress of the application, from 1 to 100. |
| Progress float32 `protobuf:"fixed32,3,opt,name=progress,proto3" json:"progress,omitempty"` |
| // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or |
| // TimelineServer that provides application-specific information. The URL uses |
| // the internal hostname, and requires a proxy server for resolution and, |
| // possibly, access. |
| TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl,proto3" json:"tracking_url,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *YarnApplication) Reset() { *m = YarnApplication{} } |
| func (m *YarnApplication) String() string { return proto.CompactTextString(m) } |
| func (*YarnApplication) ProtoMessage() {} |
| func (*YarnApplication) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{11} |
| } |
| func (m *YarnApplication) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_YarnApplication.Unmarshal(m, b) |
| } |
| func (m *YarnApplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_YarnApplication.Marshal(b, m, deterministic) |
| } |
| func (dst *YarnApplication) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_YarnApplication.Merge(dst, src) |
| } |
| func (m *YarnApplication) XXX_Size() int { |
| return xxx_messageInfo_YarnApplication.Size(m) |
| } |
| func (m *YarnApplication) XXX_DiscardUnknown() { |
| xxx_messageInfo_YarnApplication.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_YarnApplication proto.InternalMessageInfo |
| |
| func (m *YarnApplication) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *YarnApplication) GetState() YarnApplication_State { |
| if m != nil { |
| return m.State |
| } |
| return YarnApplication_STATE_UNSPECIFIED |
| } |
| |
| func (m *YarnApplication) GetProgress() float32 { |
| if m != nil { |
| return m.Progress |
| } |
| return 0 |
| } |
| |
| func (m *YarnApplication) GetTrackingUrl() string { |
| if m != nil { |
| return m.TrackingUrl |
| } |
| return "" |
| } |
| |
| // A Cloud Dataproc job resource. |
| type Job struct { |
| // Optional. The fully qualified reference to the job, which can be used to |
| // obtain the equivalent REST path of the job resource. If this property |
| // is not specified when a job is created, the server generates a |
| // <code>job_id</code>. |
| Reference *JobReference `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` |
| // Required. Job information, including how, when, and where to |
| // run the job. |
| Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement,proto3" json:"placement,omitempty"` |
| // Required. The application/framework-specific portion of the job. |
| // |
| // Types that are valid to be assigned to TypeJob: |
| // *Job_HadoopJob |
| // *Job_SparkJob |
| // *Job_PysparkJob |
| // *Job_HiveJob |
| // *Job_PigJob |
| // *Job_SparkSqlJob |
| TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` |
| // Output only. The job status. Additional application-specific |
| // status information may be contained in the <code>type_job</code> |
| // and <code>yarn_applications</code> fields. |
| Status *JobStatus `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"` |
| // Output only. The previous job status. |
| StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"` |
| // Output only. The collection of YARN applications spun up by this job. |
| // |
| // **Beta** Feature: This report is available for testing purposes only. It may |
| // be changed before final release. |
| YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications,proto3" json:"yarn_applications,omitempty"` |
| // Output only. A URI pointing to the location of the stdout of the job's |
| // driver program. |
| DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri,proto3" json:"driver_output_resource_uri,omitempty"` |
| // Output only. If present, the location of miscellaneous control files |
| // which may be used as part of job setup and handling. If not present, |
| // control files may be placed in the same location as `driver_output_uri`. |
| DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri,proto3" json:"driver_control_files_uri,omitempty"` |
| // Optional. The labels to associate with this job. |
| // Label **keys** must contain 1 to 63 characters, and must conform to |
| // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). |
| // Label **values** may be empty, but, if present, must contain 1 to 63 |
| // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). |
| // No more than 32 labels can be associated with a job. |
| Labels map[string]string `protobuf:"bytes,18,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. Job scheduling configuration. |
| Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling,proto3" json:"scheduling,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Job) Reset() { *m = Job{} } |
| func (m *Job) String() string { return proto.CompactTextString(m) } |
| func (*Job) ProtoMessage() {} |
| func (*Job) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{12} |
| } |
| func (m *Job) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Job.Unmarshal(m, b) |
| } |
| func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Job.Marshal(b, m, deterministic) |
| } |
| func (dst *Job) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Job.Merge(dst, src) |
| } |
| func (m *Job) XXX_Size() int { |
| return xxx_messageInfo_Job.Size(m) |
| } |
| func (m *Job) XXX_DiscardUnknown() { |
| xxx_messageInfo_Job.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Job proto.InternalMessageInfo |
| |
| func (m *Job) GetReference() *JobReference { |
| if m != nil { |
| return m.Reference |
| } |
| return nil |
| } |
| |
| func (m *Job) GetPlacement() *JobPlacement { |
| if m != nil { |
| return m.Placement |
| } |
| return nil |
| } |
| |
| type isJob_TypeJob interface { |
| isJob_TypeJob() |
| } |
| |
| type Job_HadoopJob struct { |
| HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"` |
| } |
| |
| type Job_SparkJob struct { |
| SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,proto3,oneof"` |
| } |
| |
| type Job_PysparkJob struct { |
| PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"` |
| } |
| |
| type Job_HiveJob struct { |
| HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,proto3,oneof"` |
| } |
| |
| type Job_PigJob struct { |
| PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,proto3,oneof"` |
| } |
| |
| type Job_SparkSqlJob struct { |
| SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"` |
| } |
| |
| func (*Job_HadoopJob) isJob_TypeJob() {} |
| |
| func (*Job_SparkJob) isJob_TypeJob() {} |
| |
| func (*Job_PysparkJob) isJob_TypeJob() {} |
| |
| func (*Job_HiveJob) isJob_TypeJob() {} |
| |
| func (*Job_PigJob) isJob_TypeJob() {} |
| |
| func (*Job_SparkSqlJob) isJob_TypeJob() {} |
| |
| func (m *Job) GetTypeJob() isJob_TypeJob { |
| if m != nil { |
| return m.TypeJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetHadoopJob() *HadoopJob { |
| if x, ok := m.GetTypeJob().(*Job_HadoopJob); ok { |
| return x.HadoopJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetSparkJob() *SparkJob { |
| if x, ok := m.GetTypeJob().(*Job_SparkJob); ok { |
| return x.SparkJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetPysparkJob() *PySparkJob { |
| if x, ok := m.GetTypeJob().(*Job_PysparkJob); ok { |
| return x.PysparkJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetHiveJob() *HiveJob { |
| if x, ok := m.GetTypeJob().(*Job_HiveJob); ok { |
| return x.HiveJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetPigJob() *PigJob { |
| if x, ok := m.GetTypeJob().(*Job_PigJob); ok { |
| return x.PigJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetSparkSqlJob() *SparkSqlJob { |
| if x, ok := m.GetTypeJob().(*Job_SparkSqlJob); ok { |
| return x.SparkSqlJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetStatus() *JobStatus { |
| if m != nil { |
| return m.Status |
| } |
| return nil |
| } |
| |
| func (m *Job) GetStatusHistory() []*JobStatus { |
| if m != nil { |
| return m.StatusHistory |
| } |
| return nil |
| } |
| |
| func (m *Job) GetYarnApplications() []*YarnApplication { |
| if m != nil { |
| return m.YarnApplications |
| } |
| return nil |
| } |
| |
| func (m *Job) GetDriverOutputResourceUri() string { |
| if m != nil { |
| return m.DriverOutputResourceUri |
| } |
| return "" |
| } |
| |
| func (m *Job) GetDriverControlFilesUri() string { |
| if m != nil { |
| return m.DriverControlFilesUri |
| } |
| return "" |
| } |
| |
| func (m *Job) GetLabels() map[string]string { |
| if m != nil { |
| return m.Labels |
| } |
| return nil |
| } |
| |
| func (m *Job) GetScheduling() *JobScheduling { |
| if m != nil { |
| return m.Scheduling |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ |
| (*Job_HadoopJob)(nil), |
| (*Job_SparkJob)(nil), |
| (*Job_PysparkJob)(nil), |
| (*Job_HiveJob)(nil), |
| (*Job_PigJob)(nil), |
| (*Job_SparkSqlJob)(nil), |
| } |
| } |
| |
| func _Job_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*Job) |
| // type_job |
| switch x := m.TypeJob.(type) { |
| case *Job_HadoopJob: |
| b.EncodeVarint(3<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.HadoopJob); err != nil { |
| return err |
| } |
| case *Job_SparkJob: |
| b.EncodeVarint(4<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.SparkJob); err != nil { |
| return err |
| } |
| case *Job_PysparkJob: |
| b.EncodeVarint(5<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.PysparkJob); err != nil { |
| return err |
| } |
| case *Job_HiveJob: |
| b.EncodeVarint(6<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.HiveJob); err != nil { |
| return err |
| } |
| case *Job_PigJob: |
| b.EncodeVarint(7<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.PigJob); err != nil { |
| return err |
| } |
| case *Job_SparkSqlJob: |
| b.EncodeVarint(12<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.SparkSqlJob); err != nil { |
| return err |
| } |
| case nil: |
| default: |
| return fmt.Errorf("Job.TypeJob has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _Job_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*Job) |
| switch tag { |
| case 3: // type_job.hadoop_job |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(HadoopJob) |
| err := b.DecodeMessage(msg) |
| m.TypeJob = &Job_HadoopJob{msg} |
| return true, err |
| case 4: // type_job.spark_job |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(SparkJob) |
| err := b.DecodeMessage(msg) |
| m.TypeJob = &Job_SparkJob{msg} |
| return true, err |
| case 5: // type_job.pyspark_job |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(PySparkJob) |
| err := b.DecodeMessage(msg) |
| m.TypeJob = &Job_PysparkJob{msg} |
| return true, err |
| case 6: // type_job.hive_job |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(HiveJob) |
| err := b.DecodeMessage(msg) |
| m.TypeJob = &Job_HiveJob{msg} |
| return true, err |
| case 7: // type_job.pig_job |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(PigJob) |
| err := b.DecodeMessage(msg) |
| m.TypeJob = &Job_PigJob{msg} |
| return true, err |
| case 12: // type_job.spark_sql_job |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(SparkSqlJob) |
| err := b.DecodeMessage(msg) |
| m.TypeJob = &Job_SparkSqlJob{msg} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _Job_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*Job) |
| // type_job |
| switch x := m.TypeJob.(type) { |
| case *Job_HadoopJob: |
| s := proto.Size(x.HadoopJob) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case *Job_SparkJob: |
| s := proto.Size(x.SparkJob) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case *Job_PysparkJob: |
| s := proto.Size(x.PysparkJob) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case *Job_HiveJob: |
| s := proto.Size(x.HiveJob) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case *Job_PigJob: |
| s := proto.Size(x.PigJob) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case *Job_SparkSqlJob: |
| s := proto.Size(x.SparkSqlJob) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // Job scheduling options. |
| type JobScheduling struct { |
| // Optional. Maximum number of times per hour a driver may be restarted as |
| // a result of driver terminating with non-zero code before job is |
| // reported failed. |
| // |
| // A job may be reported as thrashing if driver exits with non-zero code |
| // 4 times within 10 minute window. |
| // |
| // Maximum value is 10. |
| MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour,proto3" json:"max_failures_per_hour,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobScheduling) Reset() { *m = JobScheduling{} } |
| func (m *JobScheduling) String() string { return proto.CompactTextString(m) } |
| func (*JobScheduling) ProtoMessage() {} |
| func (*JobScheduling) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{13} |
| } |
| func (m *JobScheduling) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobScheduling.Unmarshal(m, b) |
| } |
| func (m *JobScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobScheduling.Marshal(b, m, deterministic) |
| } |
| func (dst *JobScheduling) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobScheduling.Merge(dst, src) |
| } |
| func (m *JobScheduling) XXX_Size() int { |
| return xxx_messageInfo_JobScheduling.Size(m) |
| } |
| func (m *JobScheduling) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobScheduling.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobScheduling proto.InternalMessageInfo |
| |
| func (m *JobScheduling) GetMaxFailuresPerHour() int32 { |
| if m != nil { |
| return m.MaxFailuresPerHour |
| } |
| return 0 |
| } |
| |
| // A request to submit a job. |
| type SubmitJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job resource. |
| Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"` |
| // Optional. A unique id used to identify the request. If the server |
| // receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests with the same |
| // id, then the second request will be ignored and the |
| // first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend |
| // is returned. |
| // |
| // It is recommended to always set this value to a |
| // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). |
| // |
| // The id must contain only letters (a-z, A-Z), numbers (0-9), |
| // underscores (_), and hyphens (-). The maximum length is 40 characters. |
| RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SubmitJobRequest) Reset() { *m = SubmitJobRequest{} } |
| func (m *SubmitJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*SubmitJobRequest) ProtoMessage() {} |
| func (*SubmitJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{14} |
| } |
| func (m *SubmitJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SubmitJobRequest.Unmarshal(m, b) |
| } |
| func (m *SubmitJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SubmitJobRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *SubmitJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SubmitJobRequest.Merge(dst, src) |
| } |
| func (m *SubmitJobRequest) XXX_Size() int { |
| return xxx_messageInfo_SubmitJobRequest.Size(m) |
| } |
| func (m *SubmitJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_SubmitJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SubmitJobRequest proto.InternalMessageInfo |
| |
| func (m *SubmitJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *SubmitJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *SubmitJobRequest) GetJob() *Job { |
| if m != nil { |
| return m.Job |
| } |
| return nil |
| } |
| |
| func (m *SubmitJobRequest) GetRequestId() string { |
| if m != nil { |
| return m.RequestId |
| } |
| return "" |
| } |
| |
| // A request to get the resource representation for a job in a project. |
| type GetJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } |
| func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*GetJobRequest) ProtoMessage() {} |
| func (*GetJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{15} |
| } |
| func (m *GetJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GetJobRequest.Unmarshal(m, b) |
| } |
| func (m *GetJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GetJobRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *GetJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GetJobRequest.Merge(dst, src) |
| } |
| func (m *GetJobRequest) XXX_Size() int { |
| return xxx_messageInfo_GetJobRequest.Size(m) |
| } |
| func (m *GetJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_GetJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GetJobRequest proto.InternalMessageInfo |
| |
| func (m *GetJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *GetJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *GetJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| // A request to list jobs in a project. |
| type ListJobsRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"` |
| // Optional. The number of results to return in each response. |
| PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` |
| // Optional. The page token, returned by a previous call, to request the |
| // next page of results. |
| PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` |
| // Optional. If set, the returned jobs list includes only jobs that were |
| // submitted to the named cluster. |
| ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` |
| // Optional. Specifies enumerated categories of jobs to list. |
| // (default = match ALL jobs). |
| // |
| // If `filter` is provided, `jobStateMatcher` will be ignored. |
| JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,proto3,enum=google.cloud.dataproc.v1beta2.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"` |
| // Optional. A filter constraining the jobs to list. Filters are |
| // case-sensitive and have the following syntax: |
| // |
| // [field = value] AND [field [= value]] ... |
| // |
| // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label |
| // key. **value** can be `*` to match all values. |
| // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. |
| // Only the logical `AND` operator is supported; space-separated items are |
| // treated as having an implicit `AND` operator. |
| // |
| // Example filter: |
| // |
| // status.state = ACTIVE AND labels.env = staging AND labels.starred = * |
| Filter string `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } |
| func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } |
| func (*ListJobsRequest) ProtoMessage() {} |
| func (*ListJobsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{16} |
| } |
| func (m *ListJobsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ListJobsRequest.Unmarshal(m, b) |
| } |
| func (m *ListJobsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ListJobsRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *ListJobsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ListJobsRequest.Merge(dst, src) |
| } |
| func (m *ListJobsRequest) XXX_Size() int { |
| return xxx_messageInfo_ListJobsRequest.Size(m) |
| } |
| func (m *ListJobsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ListJobsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ListJobsRequest proto.InternalMessageInfo |
| |
| func (m *ListJobsRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetPageSize() int32 { |
| if m != nil { |
| return m.PageSize |
| } |
| return 0 |
| } |
| |
| func (m *ListJobsRequest) GetPageToken() string { |
| if m != nil { |
| return m.PageToken |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetClusterName() string { |
| if m != nil { |
| return m.ClusterName |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher { |
| if m != nil { |
| return m.JobStateMatcher |
| } |
| return ListJobsRequest_ALL |
| } |
| |
| func (m *ListJobsRequest) GetFilter() string { |
| if m != nil { |
| return m.Filter |
| } |
| return "" |
| } |
| |
| // A request to update a job. |
| type UpdateJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| // Required. The changes to the job. |
| Job *Job `protobuf:"bytes,4,opt,name=job,proto3" json:"job,omitempty"` |
| // Required. Specifies the path, relative to <code>Job</code>, of |
| // the field to update. For example, to update the labels of a Job the |
| // <code>update_mask</code> parameter would be specified as |
| // <code>labels</code>, and the `PATCH` request body would specify the new |
| // value. <strong>Note:</strong> Currently, <code>labels</code> is the only |
| // field that can be updated. |
| UpdateMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *UpdateJobRequest) Reset() { *m = UpdateJobRequest{} } |
| func (m *UpdateJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*UpdateJobRequest) ProtoMessage() {} |
| func (*UpdateJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{17} |
| } |
| func (m *UpdateJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_UpdateJobRequest.Unmarshal(m, b) |
| } |
| func (m *UpdateJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_UpdateJobRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *UpdateJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_UpdateJobRequest.Merge(dst, src) |
| } |
| func (m *UpdateJobRequest) XXX_Size() int { |
| return xxx_messageInfo_UpdateJobRequest.Size(m) |
| } |
| func (m *UpdateJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_UpdateJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_UpdateJobRequest proto.InternalMessageInfo |
| |
| func (m *UpdateJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *UpdateJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *UpdateJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| func (m *UpdateJobRequest) GetJob() *Job { |
| if m != nil { |
| return m.Job |
| } |
| return nil |
| } |
| |
| func (m *UpdateJobRequest) GetUpdateMask() *field_mask.FieldMask { |
| if m != nil { |
| return m.UpdateMask |
| } |
| return nil |
| } |
| |
| // A list of jobs in a project. |
| type ListJobsResponse struct { |
| // Output only. Jobs list. |
| Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"` |
| // Optional. This token is included in the response if there are more results |
| // to fetch. To fetch additional results, provide this value as the |
| // `page_token` in a subsequent <code>ListJobsRequest</code>. |
| NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } |
| func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } |
| func (*ListJobsResponse) ProtoMessage() {} |
| func (*ListJobsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{18} |
| } |
| func (m *ListJobsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ListJobsResponse.Unmarshal(m, b) |
| } |
| func (m *ListJobsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ListJobsResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *ListJobsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ListJobsResponse.Merge(dst, src) |
| } |
| func (m *ListJobsResponse) XXX_Size() int { |
| return xxx_messageInfo_ListJobsResponse.Size(m) |
| } |
| func (m *ListJobsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ListJobsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ListJobsResponse proto.InternalMessageInfo |
| |
| func (m *ListJobsResponse) GetJobs() []*Job { |
| if m != nil { |
| return m.Jobs |
| } |
| return nil |
| } |
| |
| func (m *ListJobsResponse) GetNextPageToken() string { |
| if m != nil { |
| return m.NextPageToken |
| } |
| return "" |
| } |
| |
| // A request to cancel a job. |
| type CancelJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } |
| func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*CancelJobRequest) ProtoMessage() {} |
| func (*CancelJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{19} |
| } |
| func (m *CancelJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CancelJobRequest.Unmarshal(m, b) |
| } |
| func (m *CancelJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CancelJobRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *CancelJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CancelJobRequest.Merge(dst, src) |
| } |
| func (m *CancelJobRequest) XXX_Size() int { |
| return xxx_messageInfo_CancelJobRequest.Size(m) |
| } |
| func (m *CancelJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CancelJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CancelJobRequest proto.InternalMessageInfo |
| |
| func (m *CancelJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *CancelJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *CancelJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| // A request to delete a job. |
| type DeleteJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *DeleteJobRequest) Reset() { *m = DeleteJobRequest{} } |
| func (m *DeleteJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*DeleteJobRequest) ProtoMessage() {} |
| func (*DeleteJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_jobs_2b4f6e28c2cc705f, []int{20} |
| } |
| func (m *DeleteJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_DeleteJobRequest.Unmarshal(m, b) |
| } |
| func (m *DeleteJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_DeleteJobRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *DeleteJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_DeleteJobRequest.Merge(dst, src) |
| } |
| func (m *DeleteJobRequest) XXX_Size() int { |
| return xxx_messageInfo_DeleteJobRequest.Size(m) |
| } |
| func (m *DeleteJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_DeleteJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_DeleteJobRequest proto.InternalMessageInfo |
| |
| func (m *DeleteJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *DeleteJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *DeleteJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| func init() { |
| proto.RegisterType((*LoggingConfig)(nil), "google.cloud.dataproc.v1beta2.LoggingConfig") |
| proto.RegisterMapType((map[string]LoggingConfig_Level)(nil), "google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry") |
| proto.RegisterType((*HadoopJob)(nil), "google.cloud.dataproc.v1beta2.HadoopJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry") |
| proto.RegisterType((*SparkJob)(nil), "google.cloud.dataproc.v1beta2.SparkJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry") |
| proto.RegisterType((*PySparkJob)(nil), "google.cloud.dataproc.v1beta2.PySparkJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry") |
| proto.RegisterType((*QueryList)(nil), "google.cloud.dataproc.v1beta2.QueryList") |
| proto.RegisterType((*HiveJob)(nil), "google.cloud.dataproc.v1beta2.HiveJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry") |
| proto.RegisterType((*SparkSqlJob)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry") |
| proto.RegisterType((*PigJob)(nil), "google.cloud.dataproc.v1beta2.PigJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry") |
| proto.RegisterType((*JobPlacement)(nil), "google.cloud.dataproc.v1beta2.JobPlacement") |
| proto.RegisterType((*JobStatus)(nil), "google.cloud.dataproc.v1beta2.JobStatus") |
| proto.RegisterType((*JobReference)(nil), "google.cloud.dataproc.v1beta2.JobReference") |
| proto.RegisterType((*YarnApplication)(nil), "google.cloud.dataproc.v1beta2.YarnApplication") |
| proto.RegisterType((*Job)(nil), "google.cloud.dataproc.v1beta2.Job") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.Job.LabelsEntry") |
| proto.RegisterType((*JobScheduling)(nil), "google.cloud.dataproc.v1beta2.JobScheduling") |
| proto.RegisterType((*SubmitJobRequest)(nil), "google.cloud.dataproc.v1beta2.SubmitJobRequest") |
| proto.RegisterType((*GetJobRequest)(nil), "google.cloud.dataproc.v1beta2.GetJobRequest") |
| proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.dataproc.v1beta2.ListJobsRequest") |
| proto.RegisterType((*UpdateJobRequest)(nil), "google.cloud.dataproc.v1beta2.UpdateJobRequest") |
| proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.dataproc.v1beta2.ListJobsResponse") |
| proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.dataproc.v1beta2.CancelJobRequest") |
| proto.RegisterType((*DeleteJobRequest)(nil), "google.cloud.dataproc.v1beta2.DeleteJobRequest") |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.LoggingConfig_Level", LoggingConfig_Level_name, LoggingConfig_Level_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.JobStatus_State", JobStatus_State_name, JobStatus_State_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.JobStatus_Substate", JobStatus_Substate_name, JobStatus_Substate_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.YarnApplication_State", YarnApplication_State_name, YarnApplication_State_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.ListJobsRequest_JobStateMatcher", ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value) |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConn |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion4 |
| |
| // JobControllerClient is the client API for JobController service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type JobControllerClient interface { |
| // Submits a job to a cluster. |
| SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Gets the resource representation for a job in a project. |
| GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Lists regions/{region}/jobs in a project. |
| ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) |
| // Updates a job in a project. |
| UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Starts a job cancellation request. To access the job resource |
| // after cancellation, call |
| // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or |
| // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). |
| CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Deletes the job from the project. If the job is active, the delete fails, |
| // and the response returns `FAILED_PRECONDITION`. |
| DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*empty.Empty, error) |
| } |
| |
| type jobControllerClient struct { |
| cc *grpc.ClientConn |
| } |
| |
| func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient { |
| return &jobControllerClient{cc} |
| } |
| |
| func (c *jobControllerClient) SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/GetJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { |
| out := new(ListJobsResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/ListJobs", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/CancelJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*empty.Empty, error) { |
| out := new(empty.Empty) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // JobControllerServer is the server API for JobController service. |
| type JobControllerServer interface { |
| // Submits a job to a cluster. |
| SubmitJob(context.Context, *SubmitJobRequest) (*Job, error) |
| // Gets the resource representation for a job in a project. |
| GetJob(context.Context, *GetJobRequest) (*Job, error) |
| // Lists regions/{region}/jobs in a project. |
| ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) |
| // Updates a job in a project. |
| UpdateJob(context.Context, *UpdateJobRequest) (*Job, error) |
| // Starts a job cancellation request. To access the job resource |
| // after cancellation, call |
| // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or |
| // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). |
| CancelJob(context.Context, *CancelJobRequest) (*Job, error) |
| // Deletes the job from the project. If the job is active, the delete fails, |
| // and the response returns `FAILED_PRECONDITION`. |
| DeleteJob(context.Context, *DeleteJobRequest) (*empty.Empty, error) |
| } |
| |
| func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer) { |
| s.RegisterService(&_JobController_serviceDesc, srv) |
| } |
| |
| func _JobController_SubmitJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(SubmitJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).SubmitJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).SubmitJob(ctx, req.(*SubmitJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(GetJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).GetJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/GetJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).GetJob(ctx, req.(*GetJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ListJobsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).ListJobs(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/ListJobs", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).ListJobs(ctx, req.(*ListJobsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_UpdateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(UpdateJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).UpdateJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).UpdateJob(ctx, req.(*UpdateJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CancelJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).CancelJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/CancelJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).CancelJob(ctx, req.(*CancelJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_DeleteJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(DeleteJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).DeleteJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).DeleteJob(ctx, req.(*DeleteJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _JobController_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.dataproc.v1beta2.JobController", |
| HandlerType: (*JobControllerServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "SubmitJob", |
| Handler: _JobController_SubmitJob_Handler, |
| }, |
| { |
| MethodName: "GetJob", |
| Handler: _JobController_GetJob_Handler, |
| }, |
| { |
| MethodName: "ListJobs", |
| Handler: _JobController_ListJobs_Handler, |
| }, |
| { |
| MethodName: "UpdateJob", |
| Handler: _JobController_UpdateJob_Handler, |
| }, |
| { |
| MethodName: "CancelJob", |
| Handler: _JobController_CancelJob_Handler, |
| }, |
| { |
| MethodName: "DeleteJob", |
| Handler: _JobController_DeleteJob_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{}, |
| Metadata: "google/cloud/dataproc/v1beta2/jobs.proto", |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/dataproc/v1beta2/jobs.proto", fileDescriptor_jobs_2b4f6e28c2cc705f) |
| } |
| |
| var fileDescriptor_jobs_2b4f6e28c2cc705f = []byte{ |
| // 2306 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcd, 0x73, 0xdb, 0xc6, |
| 0x15, 0x17, 0xbf, 0x89, 0x47, 0x7d, 0x40, 0x1b, 0x3b, 0xe5, 0x30, 0xcd, 0x44, 0xc1, 0x4c, 0x52, |
| 0x35, 0xe9, 0x90, 0x13, 0xd6, 0x75, 0x1c, 0xbb, 0x69, 0x4c, 0x91, 0xa0, 0x49, 0x95, 0xa6, 0x18, |
| 0x90, 0xb4, 0x9b, 0x74, 0x3a, 0x08, 0x48, 0xae, 0x28, 0x48, 0x20, 0x00, 0xef, 0x02, 0xaa, 0x19, |
| 0x8f, 0x2f, 0xbd, 0xf4, 0xd0, 0x63, 0x2f, 0x9d, 0xe9, 0x4c, 0x66, 0x7a, 0x6b, 0x66, 0x7a, 0xe9, |
| 0xb5, 0xff, 0x40, 0xa7, 0x97, 0xf6, 0x90, 0x3f, 0xa1, 0x3d, 0xf4, 0xd8, 0x53, 0xcf, 0x9d, 0xdd, |
| 0x05, 0x28, 0x92, 0x92, 0x4d, 0xc8, 0xea, 0x97, 0x73, 0x12, 0xf0, 0xbe, 0xf6, 0xed, 0xfe, 0x7e, |
| 0xfb, 0xf6, 0x2d, 0x28, 0xd8, 0x1d, 0x3b, 0xce, 0xd8, 0xc2, 0xa5, 0xa1, 0xe5, 0xf8, 0xa3, 0xd2, |
| 0xc8, 0xf0, 0x0c, 0x97, 0x38, 0xc3, 0xd2, 0xe9, 0x7b, 0x03, 0xec, 0x19, 0xe5, 0xd2, 0xb1, 0x33, |
| 0xa0, 0x45, 0x97, 0x38, 0x9e, 0x83, 0x5e, 0x17, 0x96, 0x45, 0x6e, 0x59, 0x0c, 0x2d, 0x8b, 0x81, |
| 0x65, 0xe1, 0x9b, 0x41, 0x20, 0xc3, 0x35, 0x4b, 0x86, 0x6d, 0x3b, 0x9e, 0xe1, 0x99, 0x8e, 0x1d, |
| 0x38, 0x17, 0x5e, 0x0b, 0xb4, 0xfc, 0x6d, 0xe0, 0x1f, 0x96, 0xf0, 0xc4, 0xf5, 0xa6, 0x81, 0x72, |
| 0x67, 0x59, 0x79, 0x68, 0x62, 0x6b, 0xa4, 0x4f, 0x0c, 0x7a, 0x12, 0x58, 0xbc, 0xb1, 0x6c, 0xe1, |
| 0x99, 0x13, 0x4c, 0x3d, 0x63, 0xe2, 0x0a, 0x03, 0xe5, 0xef, 0x71, 0xd8, 0x68, 0x39, 0xe3, 0xb1, |
| 0x69, 0x8f, 0xab, 0x8e, 0x7d, 0x68, 0x8e, 0xd1, 0x04, 0xb6, 0x47, 0xc4, 0x3c, 0xc5, 0x44, 0xb7, |
| 0x9c, 0xb1, 0x6e, 0xe1, 0x53, 0x6c, 0xd1, 0x7c, 0x7c, 0x27, 0xb1, 0x9b, 0x2b, 0x57, 0x8a, 0xcf, |
| 0x9d, 0x4a, 0x71, 0x21, 0x50, 0xb1, 0xc6, 0xa3, 0xb4, 0x9c, 0x71, 0x8b, 0xc7, 0x50, 0x6d, 0x8f, |
| 0x4c, 0xb5, 0xad, 0xd1, 0xa2, 0xb4, 0x70, 0x0a, 0xd7, 0x2e, 0x32, 0x44, 0x32, 0x24, 0x4e, 0xf0, |
| 0x34, 0x1f, 0xdb, 0x89, 0xed, 0x4a, 0x1a, 0x7b, 0x44, 0x0d, 0x48, 0x9d, 0x1a, 0x96, 0x8f, 0xf3, |
| 0xf1, 0x9d, 0xd8, 0xee, 0x66, 0xb9, 0x7c, 0xa9, 0x64, 0x78, 0x68, 0x4d, 0x04, 0xb8, 0x1d, 0xbf, |
| 0x15, 0x53, 0x5c, 0x48, 0x71, 0x19, 0xba, 0x0e, 0xdb, 0x2d, 0xf5, 0x81, 0xda, 0xd2, 0xfb, 0xed, |
| 0x6e, 0x47, 0xad, 0x36, 0xeb, 0x4d, 0xb5, 0x26, 0xaf, 0xa1, 0x0c, 0x24, 0x2a, 0xad, 0x96, 0x1c, |
| 0x43, 0x12, 0xa4, 0x7a, 0x5a, 0xa5, 0xaa, 0xca, 0x71, 0xf6, 0x58, 0x53, 0xf7, 0xfa, 0xf7, 0xe4, |
| 0x04, 0xca, 0x42, 0xb2, 0xd9, 0xae, 0x1f, 0xc8, 0x49, 0xf6, 0xf4, 0xb0, 0xa2, 0xb5, 0xe5, 0x14, |
| 0x53, 0xab, 0x9a, 0x76, 0xa0, 0xc9, 0x69, 0xf6, 0x58, 0xaf, 0xf4, 0x2a, 0x2d, 0x39, 0xc3, 0x02, |
| 0x1d, 0xd4, 0xeb, 0x72, 0x56, 0xf9, 0x53, 0x02, 0xa4, 0x86, 0x31, 0x72, 0x1c, 0x77, 0xdf, 0x19, |
| 0xa0, 0x77, 0x61, 0x7b, 0x62, 0x98, 0xb6, 0x7e, 0x6c, 0x10, 0xfd, 0xd0, 0xb4, 0xb0, 0xee, 0x13, |
| 0x53, 0xcc, 0xb6, 0xb1, 0xa6, 0x6d, 0x32, 0xd5, 0xbe, 0x41, 0xea, 0xa6, 0x85, 0xfb, 0xc4, 0x44, |
| 0x6f, 0x00, 0x70, 0xe3, 0xa1, 0x65, 0x50, 0xca, 0xe7, 0xcf, 0xac, 0x24, 0x26, 0xab, 0x32, 0x11, |
| 0x42, 0x90, 0x34, 0xc8, 0x98, 0xe6, 0x13, 0x3b, 0x89, 0x5d, 0x49, 0xe3, 0xcf, 0x48, 0x81, 0x8d, |
| 0xf9, 0xe0, 0x34, 0x9f, 0xe4, 0xca, 0xdc, 0xf1, 0x2c, 0x2e, 0x45, 0xaf, 0x81, 0x74, 0xa6, 0x4f, |
| 0x71, 0x7d, 0xf6, 0x30, 0x54, 0xbe, 0x09, 0xeb, 0x06, 0x19, 0x1e, 0x99, 0xa7, 0x81, 0x3e, 0x2d, |
| 0xfc, 0x03, 0x19, 0x37, 0xf9, 0x11, 0x80, 0x4b, 0x1c, 0x17, 0x13, 0xcf, 0xc4, 0x34, 0x9f, 0xe1, |
| 0x2c, 0xb9, 0xb5, 0x02, 0x98, 0xd9, 0x1a, 0x14, 0x3b, 0x33, 0x57, 0x41, 0x8e, 0xb9, 0x58, 0xa8, |
| 0x0b, 0x9b, 0x96, 0x40, 0x50, 0x1f, 0x72, 0x08, 0xf3, 0xd9, 0x9d, 0xd8, 0x6e, 0xae, 0xfc, 0x9d, |
| 0xcb, 0xc0, 0xae, 0x6d, 0x58, 0xf3, 0xaf, 0x85, 0x0f, 0x61, 0x6b, 0x69, 0xcc, 0x0b, 0x78, 0x76, |
| 0x6d, 0x9e, 0x67, 0xd2, 0x1c, 0x67, 0xf6, 0xb2, 0x90, 0x16, 0xf4, 0x55, 0xfe, 0x98, 0x80, 0x6c, |
| 0xd7, 0x35, 0xc8, 0xc9, 0xd7, 0x07, 0xca, 0x87, 0x17, 0x40, 0xf9, 0xfe, 0x8a, 0xc5, 0x0e, 0x97, |
| 0xe0, 0x25, 0x46, 0xf2, 0xcf, 0x09, 0x80, 0xce, 0x74, 0x86, 0x65, 0x09, 0xae, 0x71, 0x78, 0xdc, |
| 0xa9, 0x77, 0xe4, 0xd8, 0x4b, 0x70, 0x6a, 0x1c, 0xe7, 0x0e, 0x57, 0x85, 0x78, 0x86, 0x70, 0xc5, |
| 0xe7, 0xe0, 0xda, 0x05, 0x79, 0xc9, 0x3f, 0x84, 0x73, 0xd3, 0x9d, 0x77, 0xfe, 0xef, 0x00, 0xfb, |
| 0xc9, 0x05, 0xc0, 0x7e, 0xb0, 0x62, 0xed, 0xcf, 0x56, 0xe4, 0x65, 0x83, 0x56, 0x79, 0x0b, 0xa4, |
| 0x8f, 0x7d, 0x4c, 0xa6, 0x2d, 0x93, 0x7a, 0x28, 0x0f, 0x99, 0x47, 0x3e, 0x26, 0x6c, 0xe2, 0x31, |
| 0xbe, 0x32, 0xe1, 0xab, 0xf2, 0x45, 0x12, 0x32, 0x0d, 0xf3, 0x14, 0x33, 0xd0, 0xdf, 0x86, 0x4d, |
| 0x26, 0x9e, 0x9e, 0xdf, 0xbd, 0xeb, 0x5c, 0x1e, 0x62, 0xdd, 0x04, 0x10, 0x76, 0x96, 0x49, 0x3d, |
| 0x3e, 0x72, 0xae, 0xbc, 0xbb, 0x62, 0xaa, 0xb3, 0x5c, 0xd8, 0x2e, 0x7f, 0x34, 0x4b, 0xac, 0x08, |
| 0xaf, 0x0c, 0x1d, 0xdb, 0x33, 0x6d, 0x1f, 0xeb, 0x8c, 0x27, 0x86, 0x69, 0xf9, 0x04, 0xe7, 0x13, |
| 0x3b, 0xb1, 0xdd, 0xac, 0xb6, 0x1d, 0xaa, 0x0e, 0xec, 0xba, 0x50, 0xa0, 0x43, 0x90, 0xe9, 0x90, |
| 0x98, 0xae, 0xa7, 0x9f, 0x1a, 0xc4, 0x34, 0x06, 0x16, 0x16, 0x5c, 0xc9, 0x95, 0xef, 0xac, 0x2a, |
| 0xb7, 0x62, 0x92, 0xc5, 0x2e, 0x77, 0x7f, 0x10, 0x7a, 0x07, 0xc7, 0x31, 0x5d, 0x94, 0xa2, 0x07, |
| 0x0b, 0x64, 0x49, 0xf1, 0x11, 0x6e, 0x46, 0x1c, 0xe1, 0x79, 0x4c, 0x39, 0x47, 0xf4, 0xf4, 0x39, |
| 0xa2, 0x17, 0xf6, 0xe0, 0xda, 0x45, 0x49, 0x5e, 0x06, 0xfd, 0xab, 0xd6, 0x05, 0x69, 0xc6, 0x17, |
| 0xe5, 0x2f, 0x49, 0xc8, 0xf1, 0x4d, 0xd0, 0x7d, 0x64, 0xfd, 0x8f, 0x48, 0x72, 0x7c, 0x01, 0xe8, |
| 0x09, 0x0e, 0xc9, 0x47, 0x51, 0x0a, 0xb3, 0x48, 0x3c, 0x22, 0xf0, 0x9f, 0x2e, 0x00, 0x2f, 0xa8, |
| 0x75, 0xfb, 0x12, 0xa3, 0x5c, 0x0a, 0xfc, 0x5b, 0xe7, 0xab, 0xdc, 0xf9, 0x52, 0x92, 0xbe, 0x7a, |
| 0x29, 0xf9, 0xff, 0x62, 0xd4, 0x3f, 0x92, 0x90, 0xee, 0x98, 0xe3, 0x97, 0xa4, 0xe2, 0xe0, 0x67, |
| 0x56, 0x9c, 0x55, 0xb4, 0x10, 0x73, 0x8c, 0xc8, 0xbb, 0xfe, 0x05, 0x05, 0xe7, 0x7b, 0xd1, 0x06, |
| 0xb8, 0x62, 0xbd, 0xb9, 0x80, 0x72, 0x99, 0xaf, 0x1b, 0xe5, 0x7a, 0xb0, 0xbe, 0xef, 0x0c, 0x3a, |
| 0x96, 0x31, 0xc4, 0x13, 0x6c, 0x7b, 0xac, 0x5d, 0x18, 0x5a, 0x3e, 0xf5, 0x30, 0xd1, 0x6d, 0x63, |
| 0x82, 0x83, 0x78, 0xb9, 0x40, 0xd6, 0x36, 0x26, 0x78, 0xde, 0xc4, 0xf7, 0xcd, 0x51, 0x10, 0x3e, |
| 0x34, 0xe9, 0xfb, 0xe6, 0x48, 0xf9, 0x67, 0x02, 0xa4, 0x7d, 0x67, 0xd0, 0xf5, 0x0c, 0xcf, 0xa7, |
| 0xa8, 0x06, 0x29, 0xea, 0x19, 0x9e, 0x08, 0xb6, 0x59, 0x2e, 0xae, 0x58, 0xbd, 0x99, 0x63, 0x91, |
| 0xfd, 0xc1, 0x9a, 0x70, 0x66, 0x27, 0xf5, 0x08, 0x7b, 0x86, 0x69, 0x05, 0x4d, 0xb1, 0x16, 0xbe, |
| 0xa2, 0x1a, 0xc8, 0xdc, 0x44, 0xa7, 0x9e, 0x41, 0x3c, 0x9d, 0xdd, 0x60, 0x83, 0xda, 0x50, 0x08, |
| 0x87, 0x0a, 0xaf, 0xb7, 0xc5, 0x5e, 0x78, 0xbd, 0xd5, 0x36, 0xb9, 0x4f, 0x97, 0xb9, 0x30, 0x21, |
| 0xba, 0x0f, 0x59, 0xea, 0x0f, 0x44, 0xa2, 0x19, 0x9e, 0xe8, 0x7b, 0xd1, 0x13, 0x0d, 0x1c, 0xb5, |
| 0x59, 0x08, 0xe5, 0xcb, 0x18, 0xa4, 0x78, 0xfe, 0xec, 0xfe, 0xd8, 0xed, 0x55, 0x7a, 0xea, 0xd2, |
| 0xfd, 0x31, 0x07, 0x99, 0x8e, 0xda, 0xae, 0x35, 0xdb, 0xf7, 0xe4, 0x18, 0xda, 0x04, 0xe8, 0xaa, |
| 0xbd, 0x7e, 0x47, 0xaf, 0x1d, 0xb4, 0x55, 0x39, 0xcb, 0x94, 0x5a, 0xbf, 0xdd, 0x66, 0xca, 0x38, |
| 0x42, 0xb0, 0x59, 0xad, 0xb4, 0xab, 0x6a, 0x4b, 0x0f, 0x1d, 0x12, 0x73, 0xb2, 0x6e, 0xaf, 0xa2, |
| 0xf5, 0xd4, 0x9a, 0x9c, 0x41, 0x1b, 0x20, 0x09, 0x59, 0x4b, 0xad, 0x89, 0x7b, 0x27, 0x8f, 0xb6, |
| 0x70, 0xef, 0x7c, 0x05, 0xb6, 0x2a, 0xbd, 0x9e, 0x7a, 0xbf, 0xd3, 0xd3, 0xeb, 0x95, 0x66, 0xab, |
| 0xaf, 0xa9, 0xb2, 0xa4, 0x34, 0x20, 0x1b, 0xce, 0x00, 0x6d, 0x41, 0x6e, 0x31, 0xcf, 0x0d, 0x90, |
| 0xba, 0xfd, 0xbd, 0xfb, 0xcd, 0x1e, 0x1b, 0x24, 0x86, 0x00, 0xd2, 0x1f, 0xf7, 0xd5, 0xbe, 0x5a, |
| 0x93, 0xe3, 0x48, 0x86, 0xf5, 0x6e, 0xaf, 0xd2, 0x52, 0x59, 0x0e, 0xbd, 0x7e, 0x57, 0x4e, 0x28, |
| 0x35, 0x4e, 0x27, 0x0d, 0x1f, 0x62, 0x82, 0xed, 0x21, 0x46, 0xaf, 0xf3, 0xcd, 0x7b, 0x8c, 0x87, |
| 0x9e, 0x6e, 0x8e, 0x02, 0x32, 0x49, 0x81, 0xa4, 0x39, 0x42, 0xd7, 0x21, 0x7d, 0xec, 0x0c, 0xf4, |
| 0x19, 0x89, 0x52, 0xc7, 0xce, 0xa0, 0x39, 0x52, 0xfe, 0x10, 0x87, 0xad, 0x4f, 0x0c, 0x62, 0x57, |
| 0x5c, 0xd7, 0x32, 0x87, 0xfc, 0x73, 0x07, 0x6b, 0xa3, 0xe7, 0x08, 0xc9, 0x9f, 0xd1, 0x7e, 0x48, |
| 0x2c, 0x71, 0xe1, 0xbf, 0xb1, 0x02, 0xaf, 0xa5, 0x90, 0x8b, 0xf4, 0x2a, 0x40, 0xd6, 0x25, 0xce, |
| 0x98, 0x60, 0x4a, 0x79, 0xc9, 0x8b, 0x6b, 0xb3, 0x77, 0xc6, 0x78, 0x8f, 0x18, 0xc3, 0x13, 0x56, |
| 0x08, 0x7c, 0x62, 0xe5, 0x93, 0x82, 0xf1, 0xa1, 0xac, 0x4f, 0x2c, 0xe5, 0xe7, 0xab, 0xe0, 0xce, |
| 0x40, 0xa2, 0xad, 0x3e, 0x14, 0x50, 0xb7, 0xd5, 0x87, 0x7a, 0xb7, 0xf2, 0x40, 0xa0, 0xbb, 0xb0, |
| 0xbe, 0x09, 0xb4, 0x0e, 0xd9, 0x4a, 0xb5, 0xaa, 0x76, 0x7a, 0x1c, 0xc3, 0x39, 0x1e, 0xa4, 0x98, |
| 0xaa, 0xde, 0x6c, 0x37, 0xbb, 0x0d, 0xb5, 0x26, 0xa7, 0x19, 0x10, 0x0c, 0x41, 0x8e, 0x3c, 0x40, |
| 0xfa, 0x87, 0x4d, 0x0e, 0x7b, 0x56, 0xf9, 0x95, 0x04, 0x09, 0x76, 0x82, 0x34, 0x41, 0x22, 0x21, |
| 0x0e, 0x7c, 0xd5, 0x72, 0xe5, 0x77, 0x57, 0x13, 0x7a, 0x06, 0x9d, 0x76, 0xe6, 0xcd, 0x42, 0xb9, |
| 0x61, 0x85, 0x08, 0xce, 0x98, 0x08, 0xa1, 0x66, 0x45, 0x45, 0x3b, 0xf3, 0x66, 0xe7, 0xd5, 0x11, |
| 0xbf, 0xde, 0xeb, 0xc7, 0xce, 0x80, 0x2f, 0xf4, 0xea, 0xf3, 0x6a, 0xf6, 0x3d, 0x80, 0x9d, 0x57, |
| 0x47, 0xb3, 0x0f, 0x24, 0x75, 0x90, 0x28, 0xeb, 0x2f, 0x78, 0xa4, 0x24, 0x8f, 0xf4, 0xad, 0x88, |
| 0xd7, 0xd1, 0xc6, 0x9a, 0x96, 0xa5, 0xe1, 0x8d, 0xae, 0x05, 0x39, 0x77, 0x7a, 0x16, 0x29, 0xc5, |
| 0x23, 0x7d, 0x3b, 0xf2, 0xfd, 0xa7, 0xb1, 0xa6, 0x41, 0xe0, 0xcf, 0xa2, 0x55, 0x21, 0xcb, 0x2f, |
| 0x5b, 0x2c, 0x94, 0x28, 0x42, 0x6f, 0x47, 0xeb, 0x8e, 0x1b, 0x6b, 0x5a, 0xe6, 0x28, 0xb8, 0x6f, |
| 0xdc, 0x85, 0x8c, 0x6b, 0x8e, 0x79, 0x0c, 0x71, 0xe2, 0xbc, 0x15, 0xe9, 0xc0, 0x6b, 0xac, 0x69, |
| 0x69, 0x57, 0xf4, 0x0f, 0x1d, 0xd8, 0x10, 0x53, 0xa2, 0x8f, 0x2c, 0x1e, 0x67, 0x9d, 0xc7, 0x79, |
| 0x27, 0x7a, 0xc3, 0xd6, 0x58, 0xd3, 0x72, 0x74, 0xae, 0xbd, 0xbd, 0x0b, 0x69, 0xca, 0xab, 0x5d, |
| 0x70, 0x85, 0xdb, 0x8d, 0x5a, 0x1d, 0xb5, 0xc0, 0x0f, 0x1d, 0xc0, 0xa6, 0x78, 0xd2, 0x8f, 0x4c, |
| 0xea, 0x39, 0x64, 0x9a, 0xdf, 0xe0, 0xa7, 0x79, 0xf4, 0x48, 0x1b, 0xc2, 0xbf, 0x21, 0xdc, 0xd1, |
| 0x8f, 0x61, 0x7b, 0x6a, 0x10, 0x5b, 0x37, 0xce, 0x36, 0x35, 0xcd, 0x4b, 0x3c, 0x66, 0xf1, 0x72, |
| 0xb5, 0x40, 0x93, 0xa7, 0x8b, 0x02, 0x8a, 0xee, 0x40, 0x21, 0xf8, 0xcc, 0xe9, 0xf8, 0x9e, 0xeb, |
| 0x7b, 0x3a, 0xc1, 0xd4, 0xf1, 0xc9, 0x50, 0x74, 0x63, 0xdb, 0xbc, 0x04, 0x7c, 0x43, 0x58, 0x1c, |
| 0x70, 0x03, 0x2d, 0xd0, 0xb3, 0xb6, 0xec, 0x7d, 0xc8, 0x07, 0xce, 0xac, 0x6f, 0x22, 0x8e, 0xc5, |
| 0x1b, 0x0d, 0xca, 0x5d, 0xb7, 0xb8, 0xeb, 0x75, 0xa1, 0xaf, 0x0a, 0x35, 0x6b, 0x39, 0x28, 0x73, |
| 0xac, 0x43, 0xda, 0x32, 0x06, 0xd8, 0xa2, 0x79, 0x14, 0x69, 0x1e, 0xac, 0xcd, 0x69, 0x71, 0x07, |
| 0xd1, 0xe2, 0x04, 0xde, 0xa8, 0x05, 0x40, 0x87, 0x47, 0x78, 0xe4, 0x5b, 0xa6, 0x3d, 0xce, 0x5f, |
| 0x8b, 0xd4, 0xb6, 0xb0, 0x75, 0x9e, 0xf9, 0x68, 0x73, 0xfe, 0x85, 0x0f, 0x20, 0x37, 0x37, 0xc8, |
| 0xa5, 0x7a, 0x0d, 0x80, 0xac, 0x37, 0x75, 0xf9, 0x7e, 0x50, 0xf6, 0x60, 0x63, 0x61, 0x0c, 0xf4, |
| 0x1e, 0x5c, 0x9f, 0x18, 0x8f, 0xc3, 0x56, 0x93, 0xea, 0x2e, 0x26, 0xfa, 0x91, 0xe3, 0x13, 0x1e, |
| 0x3a, 0xa5, 0xa1, 0x89, 0xf1, 0x38, 0xe8, 0x36, 0x69, 0x07, 0x93, 0x86, 0xe3, 0x13, 0xe5, 0x8b, |
| 0x18, 0xc8, 0x5d, 0x7f, 0x30, 0x31, 0x3d, 0x5e, 0xad, 0x1e, 0xf9, 0x98, 0x7a, 0xab, 0x8e, 0x99, |
| 0x57, 0x21, 0x4d, 0xf0, 0xd8, 0x74, 0x6c, 0x5e, 0x70, 0x24, 0x2d, 0x78, 0x43, 0x37, 0x20, 0xc1, |
| 0xb6, 0x86, 0xa8, 0x68, 0x4a, 0x84, 0xe2, 0xc8, 0xcc, 0xd9, 0x60, 0x44, 0x8c, 0xcb, 0x06, 0x13, |
| 0x67, 0x81, 0x14, 0x48, 0x9a, 0x23, 0xe5, 0x27, 0xb0, 0x71, 0x0f, 0xff, 0x1b, 0x92, 0x7b, 0xc6, |
| 0xd9, 0xf8, 0xd7, 0x38, 0x6c, 0xb1, 0x76, 0x7d, 0xdf, 0x19, 0xd0, 0x4b, 0x8f, 0x90, 0x5e, 0x18, |
| 0xe1, 0x35, 0x90, 0x5c, 0x63, 0x8c, 0x75, 0x6a, 0x7e, 0x2e, 0x80, 0x4b, 0x69, 0x59, 0x26, 0xe8, |
| 0x9a, 0x9f, 0x8b, 0x93, 0x9b, 0x29, 0x3d, 0xe7, 0x04, 0x87, 0xa9, 0x71, 0xf3, 0x1e, 0x13, 0x9c, |
| 0xeb, 0x13, 0x93, 0xe7, 0xfb, 0xc4, 0x63, 0xd8, 0x66, 0x13, 0x10, 0xad, 0xd9, 0xc4, 0xf0, 0x86, |
| 0x47, 0x98, 0xf0, 0xea, 0xba, 0x59, 0xfe, 0xc1, 0xaa, 0x06, 0x7a, 0x71, 0x82, 0x61, 0x05, 0xc0, |
| 0xf7, 0x45, 0x14, 0x6d, 0xeb, 0x78, 0x51, 0xc0, 0xa6, 0x78, 0x68, 0x5a, 0x1e, 0x26, 0xbc, 0x5e, |
| 0x4a, 0x5a, 0xf0, 0xa6, 0xdc, 0x84, 0xad, 0x25, 0xdf, 0xf0, 0xbb, 0xfd, 0x1a, 0x3b, 0x34, 0x2b, |
| 0xd5, 0x5e, 0xf3, 0x81, 0x1a, 0x1c, 0xca, 0x07, 0x6d, 0x3d, 0x78, 0x8f, 0x2b, 0x5f, 0xc5, 0x40, |
| 0xee, 0xbb, 0x23, 0xc3, 0xc3, 0x2f, 0x02, 0x64, 0xfc, 0x19, 0x40, 0x26, 0xe6, 0x80, 0x0c, 0xc9, |
| 0x97, 0xbc, 0x1c, 0xf9, 0xee, 0x40, 0xce, 0xe7, 0x79, 0xf1, 0x1f, 0x71, 0x82, 0xc3, 0xea, 0x7c, |
| 0x9b, 0x5b, 0x37, 0xb1, 0x35, 0xba, 0x6f, 0xd0, 0x13, 0x0d, 0x84, 0x39, 0x7b, 0x56, 0x08, 0xc8, |
| 0x67, 0x2b, 0x4b, 0x5d, 0xc7, 0xa6, 0x18, 0xdd, 0x84, 0xe4, 0xb1, 0x33, 0x10, 0x5f, 0xbf, 0xa2, |
| 0xe5, 0xc1, 0xed, 0xd1, 0xdb, 0xb0, 0x65, 0xe3, 0xc7, 0x9e, 0x3e, 0x47, 0x12, 0x31, 0xed, 0x0d, |
| 0x26, 0xee, 0x84, 0x44, 0x51, 0x3e, 0x03, 0xb9, 0x6a, 0xd8, 0x43, 0x6c, 0xfd, 0xc7, 0x76, 0xc4, |
| 0x67, 0x20, 0xd7, 0xb0, 0x85, 0x5f, 0x0c, 0xaa, 0x28, 0x23, 0x94, 0x7f, 0x91, 0xe5, 0x85, 0x2b, |
| 0xa8, 0xd5, 0x16, 0x26, 0xe8, 0xb7, 0x31, 0x90, 0x66, 0x55, 0x08, 0x95, 0x56, 0x9d, 0xaa, 0x4b, |
| 0xf5, 0xaa, 0x10, 0x61, 0x99, 0x95, 0xfa, 0xcf, 0xbe, 0xfa, 0xdb, 0x2f, 0xe3, 0x77, 0x95, 0x3b, |
| 0xb3, 0x9f, 0x0c, 0x83, 0xfc, 0x69, 0xe9, 0xc9, 0xd9, 0xdc, 0x9e, 0x96, 0x44, 0xea, 0xb4, 0xf4, |
| 0x44, 0x3c, 0x3c, 0xe5, 0xbf, 0x2c, 0xde, 0xa6, 0x7c, 0xc8, 0xdb, 0xb1, 0x77, 0xd0, 0x6f, 0x62, |
| 0x90, 0x16, 0x05, 0x09, 0xad, 0x3a, 0x00, 0x16, 0xea, 0x56, 0xa4, 0x24, 0x55, 0x9e, 0xe4, 0x47, |
| 0xe8, 0xc3, 0x17, 0x49, 0xb2, 0xf4, 0x44, 0x2c, 0xf6, 0x53, 0xf4, 0x65, 0x0c, 0xb2, 0x21, 0x33, |
| 0x51, 0xf1, 0x72, 0xc5, 0xa1, 0x50, 0x8a, 0x6c, 0x2f, 0x28, 0xaf, 0x7c, 0x9f, 0x27, 0x7d, 0x13, |
| 0xdd, 0x78, 0x91, 0xa4, 0xd1, 0xef, 0x62, 0x20, 0xcd, 0x4a, 0xc3, 0x4a, 0xe8, 0x97, 0x8b, 0x48, |
| 0xa4, 0x55, 0xdd, 0xe7, 0x09, 0xd6, 0xca, 0x57, 0x5b, 0xd5, 0xdb, 0xbc, 0x60, 0xfc, 0x3e, 0x06, |
| 0xd2, 0x6c, 0x03, 0xae, 0x4c, 0x77, 0x79, 0xab, 0x46, 0x4a, 0xf7, 0x80, 0xa7, 0xdb, 0x54, 0x6a, |
| 0x57, 0x4b, 0x77, 0xc8, 0xc7, 0x66, 0x94, 0xfd, 0x75, 0x0c, 0xa4, 0xd9, 0x96, 0x5e, 0x99, 0xf3, |
| 0xf2, 0xe6, 0x2f, 0xbc, 0x7a, 0xae, 0x1c, 0xaa, 0x13, 0xd7, 0x9b, 0x86, 0x64, 0x7d, 0xe7, 0x6a, |
| 0xcb, 0xba, 0xf7, 0x53, 0x78, 0x73, 0xe8, 0x4c, 0x9e, 0x9f, 0xd4, 0x9e, 0xc4, 0x18, 0xd7, 0x61, |
| 0xe3, 0x77, 0x62, 0x9f, 0xaa, 0x81, 0xed, 0xd8, 0xb1, 0x0c, 0x7b, 0x5c, 0x74, 0xc8, 0xb8, 0x34, |
| 0xc6, 0x36, 0xcf, 0xae, 0x24, 0x54, 0x86, 0x6b, 0xd2, 0x67, 0xfc, 0xa7, 0xc0, 0x9d, 0x50, 0x30, |
| 0x48, 0x73, 0x8f, 0xef, 0xfe, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x78, 0x17, 0x92, 0x5a, 0x20, |
| 0x00, 0x00, |
| } |